diff --git a/chains/tendermint_34/conns/conn_go110.go b/chains/tendermint_34/conns/conn_go110.go new file mode 100755 index 0000000..6821881 --- /dev/null +++ b/chains/tendermint_34/conns/conn_go110.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package conn + +// Go1.10 has a proper net.Conn implementation that +// has the SetDeadline method implemented as per +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// lest we run into problems like +// https://github.com/tendermint/tendermint/issues/851 + +import "net" + +func NetPipe() (net.Conn, net.Conn) { + return net.Pipe() +} diff --git a/chains/tendermint_34/conns/testdata/TestDeriveSecretsAndChallengeGolden.golden b/chains/tendermint_34/conns/testdata/TestDeriveSecretsAndChallengeGolden.golden new file mode 100755 index 0000000..eb69b29 --- /dev/null +++ b/chains/tendermint_34/conns/testdata/TestDeriveSecretsAndChallengeGolden.golden @@ -0,0 +1,32 @@ +9fe4a5a73df12dbd8659b1d9280873fe993caefec6b0ebc2686dd65027148e03,true,80a83ad6afcb6f8175192e41973aed31dd75e3c106f813d986d9567a4865eb2f,96362a04f628a0666d9866147326898bb0847b8db8680263ad19e6336d4eed9e,2632c3fd20f456c5383ed16aa1d56dc7875a2b0fc0d5ff053c3ada8934098c69 +0716764b370d543fee692af03832c16410f0a56e4ddb79604ea093b10bb6f654,false,84f2b1e8658456529a2c324f46c3406c3c6fecd5fbbf9169f60bed8956a8b03d,cba357ae33d7234520d5742102a2a6cdb39b7db59c14a58fa8aadd310127630f,576643a8fcc1a4cf866db900f4a150dbe35d44a1b3ff36e4911565c3fa22fc32 +358dd73aae2c5b7b94b57f950408a3c681e748777ecab2063c8ca51a63588fa8,false,c2e2f664c8ee561af8e1e30553373be4ae23edecc8c6bd762d44b2afb7f2a037,d1563f428ac1c023c15d8082b2503157fe9ecbde4fb3493edd69ebc299b4970c,89fb6c6439b12fe11a4c604b8ad883f7dc76be33df590818fe5eb15ddb01face +0958308bdb583e639dd399a98cd21077d834b4b5e30771275a5a73a62efcc7e0,false,523c0ae97039173566f7ab4b8f271d8d78feef5a432d618e58ced4f80f7c1696,c1b743401c6e4508e62b8245ea7c3252bbad082e10af10e80608084d63877977,d7c52adf12ebc69677aec4bd387b0c5a35570fe61cb7b8ae55f3ab14b1b79be0 +d93d134e72f58f177642ac30f36b2d3cd4720aa7e60feb1296411a9009cf4524,false,47a427bcc1ef6f0ce31dbf343bc8bbf49554b4dd1e2330fd97d0df23ecdbba10,73e23adb7801179349ecf9c8cdf64d71d64a9f1145ba6730e5d029f99eaf8840,a8fdcb77f591bfba7b8483aa15ae7b42054ba68625d51dec005896dfe910281f +6104474c791cda24d952b356fb41a5d273c0ce6cc87d270b1701d0523cd5aa13,true,1cb4397b9e478430321af4647da2ccbef62ff8888542d31cca3f626766c8080f,673b23318826bd31ad1a4995c6e5095c4b092f5598aa0a96381a3e977bc0eaf9,4a25a25c5f75d6cc512f2ba8c1546e6263e9ef8269f0c046c37838cc66aa83e6 +8a6002503c15cab763e27c53fc449f6854a210c95cdd67e4466b0f2cb46b629c,false,f01ff06aef356c87f8d2646ff9ed8b855497c2ca00ea330661d84ef421a67e63,4f59bb23090010614877265a1597f1a142fa97b7208e1d554435763505f36f6a,1aadcb1c8b5993da102cebcb60c545b03197c98137064530840f45d917ad300e +31a57c6b1fe33beb1f7ebbbfc06d58c4f307cd355b6f9753e58f3edec16c7559,false,13e126c4cb240349dccf0dc843977671d34a1daffd0517d06ed66b703344db22,d491431906a306af45ecf9f1977e32d7f65a79f5139f931760416de27554b687,5ea7e8e3d5a30503423341609d360d246b61a9159fc07f253a46e357977cd745 +71a3c79718b824627faeefdce887d9465b353bd962cc5e97c5b5dfedab457ef9,true,e2e8eea547dcee7eafa89ae41f48ab049beac24935fad75258924fd5273d23cb,45d2e839bf36a3616cbe8a9bdbd4e7b288bf5bf1e6e79c07995eb2b18eb2eaff,7ee50e0810bc9f98e56bc46de5da22d84b3efa52fe5d85db4b2344530ef17ed8 +2e9dba2eb4f9019c2628ff5899744469c26caf793636f30ddb76601751aee968,false,8bfc3b314e4468d4e19c9d28b7bfd5b5532263105273b0fe80801f6146313993,b77d2b223e27038f978ab87a725859f6995f903056bdbd594ab04f0b2cbad517,9032be49a9cbcd1de6fee332f8f24ebf545c05e0175b98c564e7d1e69630ae20 +81322b22c835efb26d78051f3a3840a9d01aa558c019ecfa26483b5c5535728c,true,61eacb7e9665e362ef492ef950cea58f8bc67434ab7ee5545139147adf395da4,0f600ef0c358cae938969f434c2ec0ce3be632fdf5246b7bb8ee3ff294036ecd,a7026b4c21fe225ecd775ae81249405c6f492882eb85f3f8e2232f11e515561e +826b86c5e8cb4173ff2d05c48e3537140c5e0f26f7866bbcd4e57616806e1be2,true,ae44dabd077d227c8d898930a7705a2b785c8849121282106c045bb58b66eb36,24b2c1b1e2a9ebe387df6dfb9fbde6c681e4eeb0a33bb1c3df3789087f56ffe3,b37a64ea97431b25cb271c4c8435f6dd97118b35da57168f3c3c269920f7bbc1 +18b5a7b973d4b263072e69515c5b6ed22191c3d6e851aaba872904672f8344ec,true,ce402af2fb93b6ef18cd406f7c437d3cbfb09141b7a02116b1cfbabbf75ad84a,c86bdb1709ef0f4a31a818843660f83338b9db77e262bb7c6546138e51c6046b,11fcd8e59c4e7f6050d3cd332337db794ae31260c159e409af3ed8f4d6523bf4 +26d10c56872b72bb76ae7c7b3f074afb3d4a364e5e3f8c661be9b4f5a522ea75,true,1c9782a8485c4ecb13904ec551a7f9300ecd687abfbe63c91c7fd583f84a7a4d,ae3f4ccd0dfee8b514f67db2e923714d324935b9ae9e488d088ebb79569d8cc4,8139a3ab728b0e765e4d90549ab8eed7e1048a83267eafa7442208a7f627558a +558838dfcfe94105c46a4ade4548e6c96271d33e6c752661356cc66024615bae,true,d5a38625be74177318072cf877f2427ce2327e9b58d2eb134d0ac52c9126572f,dead938f77007e3164b6eee4cd153433d03ca5d9ec64f41aa6b2d6a069edeeda,4a081a356361da429c564cf7ac8e217121bbe8c5ee5c9632bae0b7ddbe94f9d4 +f4a3f6a93a4827a59682fd8bf1a8e4fd9aaff01a337a86e1966c8fff0e746014,true,39a0aea2a8ac7f0524d63e395a25b98fc3844ed039f20b11058019dca2b3840f,6ff53243426ded506d22501ae0f989d9946b86a8bb2550d7ed6e90fdf41d0e7c,8784e728bf12f465ed20dc6f0e1d949a68e5795d4799536427a6f859547b7fd6 +1717020e1c4fca1b4926dba16671c0c04e4f19c621c646cb4525fa533b1c205c,false,b9a909767f3044608b4e314b149a729bef199f8311310e1ecd2072e5659b7194,7baf0ff4b980919cf545312f45234976f0b6c574aac5b772024f73248aad7538,99a18e1e4b039ef3777a8fdd0d9ffaccaf3b4523b6d26adacfe91cc5fcd9977e +de769062be27b2a4248dd5be315960c8d231738417ece670c2d6a1c52877b59e,true,cc6c2086718b21813513894546e85766d34c754e81fd6a19c12fc322ffb9b1c3,5a7da7500191c65a5f1fbb2a6122717edc70ca0469baf2bbbd6ca8255b93c077,8c0d32091dc687f1399c754a617d224742726bece848b50c35b4db5f0469ace7 +7c5549f36767e02ebf49a4616467199459aa6932dcc091f182f822185659559a,true,d8335e606128b0c621ff6cda99dc62babf4a4436c574c5c478c20122712727d0,0a7c673cccd6f7fd4ed1673f7d0f2cb08961faced123ca901b74581d5bdc8b25,16ac1eb2a39384716c7d490272d87e76c10665fdb331e1883435de175ce4460e +ecf8261ebda248dc7796f98987efe1b7be363a59037c9e61044490d08a077610,true,53def80fcdba01367c0ea36459b57409f59a771f57a8259b54f24785e5656b7d,90140870b3b1e84c9dcf7836eac0581b16fe0a40307619d267c6f871e1efce6a,c6d1836b66c1a722a377c7eb058995a0ef8711839c6d6a0cdd6ad1ff70f935a5 +21c0ef76ce0eae9391ceabfb08a861899db55ac4ccf010ed672599669c6938f2,false,8af5482cc015093f261d5b7ce87035dda41d8318b9960b52cca3e5f0d3f61808,f4d5338bcb57262e1034f01ed3858ca1e5d66a73f18588e72f3dc8c6a730be0c,7ba82c2820c95e3354d9a6ab4920ebcd7938ce19e25930fee58439246b0321b1 +05f3b66d6b0fe906137e60b4719083a2465106badedcdae3a4c91c46c5367340,false,e5c9e074e95c2896fa4093830e96e9cf159b8dcba2ead21f37237cf6e9a9aaa2,b3a0a50309b4ca23cd34363fd8df30e73ec4a275973986c2e11a53752eff0a3b,358a62056ff05f27185b9952d291c6346171937f6811cafbacddd82e17010f39 +fef0251cff7c5d1ba0514f1820a8265453365fd9f5bb8a92f955dc007a40e730,true,e35a0aff6e9060a39c15d276a1337f1948d0be0aef81fcd563a6783115b5283d,20a8efe83474253d70e5fd847df0cd26222cd39e9210687b68c0a23b73429108,2989fab4278b32f4f40dc02227ab30e10f62e15ab7aa7382da769b1d084e33df +1b7bb172baa2753ec9c3e81a7a9b4c6ef10f9ed7afcafa975395f095eca63a54,false,a98257203987d0c4d260d8feef841466977276612e268b69b5ce4191af161b29,ea177a20d6c1f73f9667090568f9197943037d6586f7e2d6b7b81756fc71df5f,844eff318ef4c6ee45f158c1946ff999e40ffac70883ab6d6b90995f246e69a2 +5ee9b60a25753066d0ecc1155ca6afcc6b853ba558c9533c134a93b82e756856,true,9889460b95ca9545864a4a5194891b7d475362428d6d797532da10bf1fc92076,a7a96739abd8eceb6751afc98df68e29f7af16fbfda3d4710df9c35b6dcdb4d5,998326285c90a2ea2e1f6c6dac79530742645e3dd1b2b42a0733388a99cab81b +a102613781872f88a949d82cb5efcc2e0f437010a950d71b87929ecb480af3b3,false,e099080a55b9b29ccecbbb0d91dbe49defcc217efd1de0588e0836ce5970d327,319293b8660a3cea9879487645ddadda72a5c60079c9154bb0dbb8a0c9cda79e,4d567f1b1a1b304347cf7b129e4c7a05aa57e2bbb8ea335db9e33d05fab12e4d +1d4538180d06f37c43e8caa2d0d80aa7c5d701c8c3e31508704131427837f5cc,true,73afeeb46efc03d2b9f20fc271752528e52b8931287296a7e4367c96bccb32bd,59dc4b69d9ccf6f77715e47fb9bf454f1b90bbd05f1d2bbd07c7d6666f31c91f,ac59d735dfcdc3a0a4ce5a10f09dea8c6afd47de9c0308dc817e3789c8aee963 +e4c480af1b0e3487a331761f64eb3f020a2b8ffa25ad17e00f57aa7ec2c5e84d,true,1145e9f001c70d364e97fcdbc88a2a3d6aecdd975212923820f90a0b215f11f6,b802ac7ef21c8abaeae024c76e3fa70a2a82f73e0bb7c7fe76752ad1742af2e6,0a95876e30617e32ae25acd3af97c37dc075825f800def3f2bf3f68a268744e9 +3a7a83dd657dd6277bcfa957534f40d9b559039aad752066a8d7ed9a6d9c0ab5,false,f90a251ad2338b19cfee6a7965f6f5098136974abb99b3d24553fa6117384978,e422ed7567e5602731b3d980106d0546ef4a4da5eb7175d66a452df12d37bad2,b086bed71dfb6662cb10e2b4fb16a7c22394f488e822fc19697db6077f6caf6f +273e8560c2b1734e863a6542bded7a6fcbfb49a12770bd8866d4863dceea3ae9,false,3b7849a362e7b7ba8c8b8a0cd00df5180604987dbda6c03f37d9a09fdb27fb28,e6cdf4d767df0f411e970da8dda6acd3c2c34ce63908d8a6dbf3715daa0318e4,359a4a39fbdffc808161a48a3ffbe77fc6a03ff52324c22510a42e46c08a6f22 +9b4f8702991be9569b6c0b07a2173104d41325017b27d68fa5af91cdab164c4d,true,598323677db11ece050289f31881ee8caacb59376c7182f9055708b2a4673f84,7675adc1264b6758beb097a991f766f62796f78c1cfa58a4de3d81c36434d3ae,d5d8d610ffd85b04cbe1c73ff5becd5917c513d9625b001f51d486d0dadcefe3 +e1a686ba0169eb97379ebf9d22e073819450ee5ad5f049c8e93016e8d2ec1430,false,ffe461e6075865cde2704aa148fd29bcf0af245803f446cb6153244f25617993,46df6c25fa0344e662490c4da0bddca626644e67e66705840ef08aae35c343fa,e9a56d75acad4272ab0c49ee5919a4e86e6c5695ef065704c1e592d4e7b41a10 diff --git a/chains/tendermint_34/handlertendermint_34.go b/chains/tendermint_34/handlertendermint_34.go new file mode 100644 index 0000000..78513d3 --- /dev/null +++ b/chains/tendermint_34/handlertendermint_34.go @@ -0,0 +1,1140 @@ +package tendermint_34 + +import( + "bufio" + // bytes + "erros" + "fmt" + "io" + "net" + "os" + "reflect" + "runtime" + "strconv" + "time" + "encoding/hex" + "encoding/json" + b64 "encoding/base64" + "net/http" + "io/ioutil" + + "github.com/gogo/protobuf/proto" + tmp2p "github.com/tendermint/tendermint/proto/p2p" + log "githun.com/sirupsen/logrus" + "github.com/hashicorp/golang-lru" + "github.com/supragya/tendermint_connector/chains" + "github.com/supragya/tendermint_connector/chains/tendermint_34/conn" + cmn "github.com/supragya/tendermint_connector/chains/tendermint_34/libs/common" + flow "github.com/supragya/tendermint_connector/chains/tendermint_34/libs/flowrate" + marlinTypes "github.com/supragya/tendermint_connector/types" + "github.com/tendermint/tendermint/crypto/ed25519" + + + // Protocols + "github.com/supragya/tendermint_connector/marlin" +) + +// ServicedTMCore is a string associated with each TM core handler +// to decipher which handler is to be attached. +var ServicedTMCore chains.NodeType = chains.NodeType{Version: "0.34.0", Network: "tendermint", ProtocolVersionApp: "2", ProtocolVersionBlock: "9", ProtocolVersionP2p: "5"} + +// ---------------------- DATA CONNECT INTERFACE -------------------------------- + + +func RunDataConnect(peerAddr string, + marlinTo chan marlinTypes.MarlinMessage, + marlinFrom chan marlinTypes.MarlinMessage, + isConnectionOutgoing bool, + keyFile string, + listenPort int) { + log.Info("Starting Tendermint(.34) Core Handler") + + if keyFile != "" { + isKeyFileUsed = true + keyFileLocation = keyFile + } + + for { + handler, err := createTMHandler(peerAddr, "0.0.0.0:0", marlinTo, marlinFrom, isConnectionOutgoing, listenPort, true) + + if err != nil { + log.Error("Error encountered while creating TM Handler: ", err) + os.Exit(1) + } + + if isConnectionOutgoing { + err = handler.dialPeer() + } else { + err = handler.acceptPeer() + } + if err != nil { + log.Error("Base Connection establishment with peer unsuccessful: ", err) + goto REATTEMPT_CONNECTION + } + + err = handler.upgradeConnectionAndHandshake() + if err != nil { + log.Error("Error while upgrading connection and handshaking with peer: ", err) + goto REATTEMPT_CONNECTION + } + + handler.beginServicing() + + select { + case <-handler.signalConnError: + handler.signalShutSend <- struct{}{} + handler.signalShutRecv <- struct{}{} + handler.signalShutThroughput <- struct{}{} + goto REATTEMPT_CONNECTION + } + + REATTEMPT_CONNECTION: + handler.baseConnection.Close() + handler.secretConnection.Close() + log.Info("Error encountered with connection to the peer. Attempting reconnect post 1 second.") + time.Sleep(1 * time.Second) + } +} + +func (h *TendermintHandler) dialPeer() error { + var err error + h.baseConnection, err = net.DialTimeout("tcp", h.peerAddr, 2000*time.Millisecond) + if err != nil { + return err + } + + return nil +} + +func (h *TendermintHandler) acceptPeer() error { + log.Info("TMCore side listening for dials to ", + string(hex.EncodeToString(h.privateKey.PubKey().Address())), "@:", h.listenPort) + + listener, err := net.Listen("tcp", "0.0.0.0:"+strconv.Itoa(h.listenPort)) + if err != nil { + return err + } + + h.baseConnection, err = listener.Accept() + if err != nil { + return err + } + + return nil +} + +func (h *TendermintHandler) upgradeConnectionAndHandshake() error { + var err error + h.secretConnection, err = conn.MakeSecretConnection(h.baseConnection, h.privateKey) + if err != nil { + return err + } + + err = h.handshake() + if err != nil { + return err + } + + log.Info("Established connection with TM peer [" + + string(hex.EncodeToString(h.secretConnection.RemotePubKey().Address())) + + "] a.k.a. " + h.peerNodeInfo.Moniker) + return nil +} + +func (h *TendermintHandler) handshake() error { + var ( + errc = make(chan error, 2) + ourNodeInfo DefaultNodeInfo = DefaultNodeInfo{ + ProtocolVersion{App: 2, Block: 9, P2P: 5}, + string(hex.EncodeToString(h.privateKey.PubKey().Address())), + "tcp://127.0.0.1:20006", + "tendermint_34", + "0.34.0", + []byte{channelBc, channelCsSt, channelCsDc, channelCsVo, + channelCsVs, channelMm, channelEv}, + "marlin-tendermint-connector", + DefaultNodeInfoOther{"on", "tcp://0.0.0.0:26667"}, + } + ) + go func(errc chan<- error, c net.Conn) { + _, err := h.proto.MarshalText(c, ourNodeInfo) + if err != nil { + log.Error("Error encountered while sending handshake message") + } + errc <- err + }(errc, h.secretConnection) + go func(errc chan<- error, c net.Conn) { + _, err := h.proto.UnmarshalText(c,&h.peerNodeInfo) //to check + if err != nil { + log.Error("Error encountered while recieving handshake message") + } + errc <- err + }(errc, h.secretConnection) + + for i := 0; i < cap(errc); i++ { + err := <-errc + if err != nil { + log.Error("Encountered error in handshake with TM core: ", err) + return err + } + } + return nil +} + + +func (h *TendermintHandler) beginServicing() error { + // Register Messages + EncodePacket(h.protobuf) + EncodeConsensusMessages(h.protobuf) + DecodePacket(h.protobuf) + DecodeConsensusMessages(h.protobuf) + + // Create a P2P Connection + h.p2pConnection = P2PConnection{ + conn: h.secretConnection, + bufConnReader: bufio.NewReaderSize(h.secretConnection, 65535), + bufConnWriter: bufio.NewWriterSize(h.secretConnection, 65535), + sendMonitor: flow.New(0, 0), + recvMonitor: flow.New(0, 0), + send: make(chan struct{}, 1), + pong: make(chan struct{}, 1), + doneSendRoutine: make(chan struct{}, 1), + quitSendRoutine: make(chan struct{}, 1), + quitRecvRoutine: make(chan struct{}, 1), + flushTimer: cmn.NewThrottleTimer("flush", 100*time.Millisecond), + pingTimer: time.NewTicker(30 * time.Second), + pongTimeoutCh: make(chan bool, 1), + } + + // Start P2P Send and recieve routines + Status messages for message throughput + go h.sendRoutine() + go h.recvRoutine() + go h.throughput.presentThroughput(5, h.signalShutThroughput) + + // Allow tenderment.34 version messages from marlin Relay + marlin.AllowServicedChainMessages(h.servicedChainId) + return nil +} + +func (h *TendermintHandler) sendRoutine() { + log.Info("TMCore <- Connector Routine Started") + + for { + SELECTION: + select { + + case <-h.p2pConnection.pingTimer.C: // Send PING messages to TMCore + _n, err := h.protobuf.EncodePacket(h.p2pConnection.bufConnWriter, PacketPing{}) + if err != nil { + break SELECTION + } + h.p2pConnection.sendMonitor.Update(int(_n)) + h.p2pConnection.pongTimer = time.AfterFunc(60*time.Second, func() { + select { + case h.p2pConnection.pongTimeoutCh <- true: + default: + } + }) + + err = h.p2pConnection.bufConnWriter.Flush() + if err != nil { + log.Error("Cannot flush buffer PingTimer: ", err) + h.signalConnError <- struct{}{} + } + + case <-h.p2pConnection.pong: // Send PONG messages to TMCore + _n, err := h.protobuf.EncodePacket(h.p2pConnection.bufConnWriter, PacketPong{}) + if err != nil { + log.Error("Cannot send Pong message: ", err) + break SELECTION + } + h.p2pConnection.sendMonitor.Update(int(_n)) + err = h.p2pConnection.bufConnWriter.Flush() + if err != nil { + log.Error("Cannot flush buffer: ", err) + h.signalConnError <- struct{}{} + } + + case timeout := <-h.p2pConnection.pongTimeoutCh: // Check if PONG messages are received in time + if timeout { + log.Error("Pong timeout, TM Core did not reply in time!") + h.p2pConnection.stopPongTimer() + h.signalConnError <- struct{}{} + } else { + h.p2pConnection.stopPongTimer() + } + + case <-h.signalShutSend: // Signal to Shut down sendRoutine + log.Info("node <- Connector Routine shutdown") + h.p2pConnection.stopPongTimer() + close(h.p2pConnection.doneSendRoutine) + return + + case marlinMsg := <-h.marlinFrom: // Actual message packets from Marlin Relay (encoded in Marlin Tendermint Data Transfer Protocol v1) + switch marlinMsg.Channel { + case channelCsSt: + msg, err:= h.decodeConsensusMsgFromChannelBuffer(marlinMsg.Packets) + if err != nil { + log.Debug("Cannot decode message recieved from marlin to a valid Consensus Message: ", err) + } else { + switch msg.(type) { + case *NewRoundStepMessage: + for _, pkt := range marlinMsg.Packets { + _n, err := h.protobuf.EncodePacket( + h.p2pConnection.bufConnWriter, + PacketMsg{ + ChannelID: byte(pkt.ChannelID), + EOF: byte(pkt.EOF), + Bytes: pkt.Bytes, + }) + if err != nil { + log.Error("Error occurred in sending data to TMCore: ", err) + h.signalConnError <- struct{}{} + } + h.p2pConnection.sendMonitor.Update(int(_n)) + err = h.p2pConnection.bufConnWriter.Flush() + if err != nil { + log.Error("Cannot flush buffer: ", err) + h.signalConnError <- struct{}{} + } + } + h.throughput.putInfo("to", "+CsStNRS", uint32(len(marlinMsg.Packets))) + default: + h.throughput.putInfo("to", "-CsStUNK", uint32(len(marlinMsg.Packets))) + } + } + + case channelCsVo: + msg, err:= h.decodeConsensusMsgFromChannelBuffer(marlinMsg.Packets) + if err != nil { + log.Debug("Cannot decode message recieved from marlin to a valid Consensus Message: ", err) + } else { + switch msg.(type) { + case *VoteMessage: + for _, pkt := range marlinMsg.Packets { + _n, err := h.protobuf.EncodePacket( + h.p2pConnection.bufConnWriter, + PacketMsg{ + ChannelID: byte(pkt.ChannelID), + EOF: byte(pkt.EOF), + Bytes: pkt.Bytes, + }) + if err != nil { + log.Error("Error occurred in sending data to TMCore: ", err) + h.signalConnError <- struct{}{} + } + h.p2pConnection.sendMonitor.Update(int(_n)) + err = h.p2pConnection.bufConnWriter.Flush() + if err != nil { + log.Error("Cannot flush buffer: ", err) + h.signalConnError <- struct{}{} + } + } + h.throughput.putInfo("to", "+CsVoVOT", uint32(len(marlinMsg.Packets))) + default: + h.throughput.putInfo("to", "-CsVoUNK", uint32(len(marlinMsg.Packets))) + } + } + + case channelCsDc: + msg, err:= h.decodeConsensusMsgFromChannelBuffer(marlinMsg.Packets) + if err != nil { + log.Debug("Cannot decode message recieved from marlin to a valid Consensus Message: ", err) + } else { + switch msg.(type) { + case *ProposalMessage: + for _, pkt := range marlinMsg.Packets { + _n, err := h.protobuf.EncodePacket( + h.p2pConnection.bufConnWriter, + PacketMsg{ + ChannelID: byte(pkt.ChannelID), + EOF: byte(pkt.EOF), + Bytes: pkt.Bytes, + }) + if err != nil { + log.Error("Error occurred in sending data to TMCore: ", err) + h.signalConnError <- struct{}{} + } + h.p2pConnection.sendMonitor.Update(int(_n)) + err = h.p2pConnection.bufConnWriter.Flush() + if err != nil { + log.Error("Cannot flush buffer: ", err) + h.signalConnError <- struct{}{} + } + } + h.throughput.putInfo("to", "+CsDcPRP", uint32(len(marlinMsg.Packets))) + case *ProposalPOLMessage: + // Not serviced + case *BlockPartMessage: + for _, pkt := range marlinMsg.Packets { + _n, err := h.protobuf.EncodePacket( + h.p2pConnection.bufConnWriter, + PacketMsg{ + ChannelID: byte(pkt.ChannelID), + EOF: byte(pkt.EOF), + Bytes: pkt.Bytes, + }) + if err != nil { + log.Error("Error occurred in sending data to TMCore: ", err) + h.signalConnError <- struct{}{} + } + h.p2pConnection.sendMonitor.Update(int(_n)) + err = h.p2pConnection.bufConnWriter.Flush() + if err != nil { + log.Error("Cannot flush buffer: ", err) + h.signalConnError <- struct{}{} + } + } + h.throughput.putInfo("to", "+CsDcBPM", uint32(len(marlinMsg.Packets))) + default: + h.throughput.putInfo("to", "-CsDcUNK", uint32(len(marlinMsg.Packets))) + } + } + default: + h.throughput.putInfo("to", "-UnkUNK", uint32(len(marlinMsg.Packets))) + log.Debug("TMCore <- connector Not servicing undecipherable channel ", marlinMsg.Channel) + } + } + } +} + +func (h *TendermintHandler) recvRoutine() { + log.Info("TMCore -> Connector Routine Started") + +FOR_LOOP: + for { + select { + case <-h.signalShutRecv: + log.Info("TMCore -> Connector Routine shutdown") + break FOR_LOOP + default: + } + h.p2pConnection.recvMonitor.Limit(20000, 5120000, true) + + /* + Peek into bufConnReader for debugging + if numBytes := c.bufConnReader.Buffered(); numBytes > 0 { + bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100)) + if err == nil { + // return + } else { + log.Debug("Error peeking connection buffer ", "err ", err) + // return nil + } + log.Info("Peek connection buffer ", "numBytes ", numBytes, " bz ", bz) + } + */ + + // Read packet type + var packet Packet + _n, err := h.protobuf.DecodePacket( + h.p2pConnection.bufConnReader, + &packet, + int64(20000)) + + h.p2pConnection.recvMonitor.Update(int(_n)) + + // Unmarshalling test + if err != nil { + if err == io.EOF { + log.Error("TMCore -> Connector Connection is closed (likely by the other side)") + } else { + log.Error("TMCore -> Connector Connection failed (reading byte): ", err) + } + h.signalConnError <- struct{}{} + break FOR_LOOP + } + + // Read more depending on packet type. + switch pkt := packet.(type) { + case PacketPing: // Received PING messages from TMCore + select { + case h.p2pConnection.pong <- PacketPong{}: + default: + } + + case PacketPong: // Received PONG messages from TMCore + select { + case h.p2pConnection.pongTimeoutCh <- false: + default: + } + + case PacketMsg: // Actual message packets from TMCore + switch pkt.ChannelID { + case channelBc: + h.throughput.putInfo("from", "=BcMSG", 1) + log.Debug("TMCore -> Connector Blockhain is not serviced") + case channelCsSt: + h.channelBuffer[channelCsSt] = append(h.channelBuffer[channelCsSt], + marlinTypes.PacketMsg{ + ChannelID: uint32(pkt.ChannelID), + EOF: uint32(pkt.EOF), + Bytes: pkt.Bytes, + }) + + if pkt.EOF == byte(0x01) { + msg, err := h.decodeConsensusMsgFromChannelBuffer(h.channelBuffer[channelCsSt]) + if err != nil { + log.Error("Cannot decode message recieved from TMCore to a valid Consensus Message: ", err) + } else { + message := marlinTypes.MarlinMessage{ + ChainID: h.servicedChainId, + Channel: channelCsSt, + Packets: h.channelBuffer[channelCsSt], + } + + switch msg.(type) { + // Only NRS is sent forward + case *NewRoundStepMessage: + select { + case h.marlinTo <- message: + default: + log.Warning("Too many messages in channel marlinTo. Dropping oldest messages") + _ = <-h.marlinTo + h.marlinTo <- message + } + select { + case h.marlinFrom <- message: + default: + log.Warning("Too many messages in channel marlinFrom. Dropping oldest messages") + _ = <-h.marlinFrom + h.marlinFrom <- message + } + h.throughput.putInfo("from", "+CsStNRS", uint32(len(h.channelBuffer[channelCsSt]))) + case *NewValidBlockMessage: + // h.throughput.putInfo("from", "=CsStNVB", uint32(len(h.channelBuffer[channelCsSt]))) + case *HasVoteMessage: + // h.throughput.putInfo("from", "=CsStHVM", uint32(len(h.channelBuffer[channelCsSt]))) + case *VoteSetMaj23Message: + // h.throughput.putInfo("from", "=CsStM23", uint32(len(h.channelBuffer[channelCsSt]))) + default: + h.throughput.putInfo("from", "-CsStUNK", uint32(len(h.channelBuffer[channelCsSt]))) + } + } + h.channelBuffer[channelCsSt] = h.channelBuffer[channelCsSt][:0] + } + case channelCsDc: + h.channelBuffer[channelCsDc] = append(h.channelBuffer[channelCsDc], + marlinTypes.PacketMsg{ + ChannelID: uint32(pkt.ChannelID), + EOF: uint32(pkt.EOF), + Bytes: pkt.Bytes, + }) + if pkt.EOF == byte(0x01) { + msg, err := h.decodeConsensusMsgFromChannelBuffer(h.channelBuffer[channelCsDc]) + if err != nil { + log.Error("Cannot decode message recieved from TMCore to a valid Consensus Message: ", err) + } else { + message := marlinTypes.MarlinMessage{ + ChainID: h.servicedChainId, + Channel: channelCsDc, + Packets: h.channelBuffer[channelCsDc], + } + + switch msg.(type) { + case *ProposalMessage: + select { + case h.marlinTo <- message: + default: + log.Warning("Too many messages in channel marlinTo. Dropping oldest messages") + _ = <-h.marlinTo + h.marlinTo <- message + } + h.throughput.putInfo("from", "+CsDcPRP", uint32(len(h.channelBuffer[channelCsDc]))) + case *ProposalPOLMessage: + // Not serviced + case *BlockPartMessage: + select { + case h.marlinTo <- message: + default: + log.Warning("Too many messages in channel marlinTo. Dropping oldest messages") + _ = <-h.marlinTo + h.marlinTo <- message + } + h.throughput.putInfo("from", "+CsDcBPM", uint32(len(h.channelBuffer[channelCsDc]))) + default: + h.throughput.putInfo("from", "-CsDcMSG", uint32(len(h.channelBuffer[channelCsDc]))) + } + } + h.channelBuffer[channelCsDc] = h.channelBuffer[channelCsDc][:0] + + } + case channelCsVo: + h.channelBuffer[channelCsVo] = append(h.channelBuffer[channelCsVo], + marlinTypes.PacketMsg{ + ChannelID: uint32(pkt.ChannelID), + EOF: uint32(pkt.EOF), + Bytes: pkt.Bytes, + }) + if pkt.EOF == byte(0x01) { + msg, err := h.decodeConsensusMsgFromChannelBuffer(h.channelBuffer[channelCsVo]) + if err != nil { + log.Error("Cannot decode message recieved from TMCore to a valid Consensus Message: ", err) + } else { + message := marlinTypes.MarlinMessage{ + ChainID: h.servicedChainId, + Channel: channelCsVo, + Packets: h.channelBuffer[channelCsVo], + } + + switch msg.(type) { + case *VoteMessage: + select { + case h.marlinTo <- message: + default: + log.Warning("Too many messages in channel marlinTo. Dropping oldest messages") + _ = <-h.marlinTo + h.marlinTo <- message + } + h.throughput.putInfo("from", "+CsVoVOT", uint32(len(h.channelBuffer[channelCsVo]))) + default: + h.throughput.putInfo("from", "-CsVoVOT", uint32(len(h.channelBuffer[channelCsVo]))) + } + } + h.channelBuffer[channelCsVo] = h.channelBuffer[channelCsVo][:0] + } + case channelCsVs: + h.throughput.putInfo("from", "=CsVsVSB", 1) + log.Debug("TMCore -> Connector Consensensus Vote Set Bits Channel is not serviced") + case channelMm: + h.throughput.putInfo("from", "=MmMSG", 1) + log.Debug("TMCore -> Connector Mempool Channel is not serviced") + case channelEv: + h.throughput.putInfo("from", "=EvMSG", 1) + log.Debug("TMCore -> Connector Evidence Channel is not serviced") + default: + h.throughput.putInfo("from", "=UnkUNK", 1) + log.Warning("TMCore -> Connector Unknown ChannelID Message recieved: ", pkt.ChannelID) + } + + default: + log.Error("TMCore -> Connector Unknown message type ", reflect.TypeOf(packet)) + log.Error("TMCore -> Connector Connection failed: ", err) + h.signalConnError <- struct{}{} + break FOR_LOOP + } + } + + // Cleanup + close(h.p2pConnection.pong) + for range h.p2pConnection.pong { + // Drain + } +} + + + + + +func (h *TendermintHandler) decodeConsensusMsgFromChannelBuffer(chanbuf []marlinTypes.PacketMsg) (ConsensusMessage, error) { + var databuf []byte + var msg ConsensusMessage + var err error + for _, pkt := range chanbuf { + databuf = append(databuf, pkt.Bytes...) + } + if len(databuf) > 1048576 { + return msg, errors.New("Message is larger than 1MB. Cannot decode") + } + err = h.protobuf.DecodePacket(databuf, &msg) + return msg, err + + +func (c *P2PConnection) stopPongTimer() { + if c.pongTimer != nil { + _ = c.pongTimer.Stop() + c.pongTimer = nil + } +} + + +// ---------------------- SPAM FILTER INTERFACE -------------------------------- + + +// RunSpamFilter serves as the entry point for a TM Core handler when serving as a spamfilter +func RunSpamFilter(rpcAddr string, + marlinTo chan marlinTypes.MarlinMessage, + marlinFrom chan marlinTypes.MarlinMessage) { + log.Info("Starting Tendermint(.34) SpamFilter - 0.16.3-d83fc038-2-mainnet") + + handler, err := createTMHandler("0.0.0.0:0", rpcAddr, marlinTo, marlinFrom, false, 0, false) + if err != nil { + log.Error("Error encountered while creating TM Handler: ", err) + os.Exit(1) + } + + marlin.AllowServicedChainMessages(handler.servicedChainId) + + EcnodePacket(handler.protobuf) + EncodeConsensusMessages(handler.codec) + DecodePacket(handler.protobuf) + DecodeConsensusMessages(handler.codec) + + coreCount := runtime.NumCPU() + multiple := 2 + log.Info("Runtime found number of CPUs on machine to be ", coreCount, ". Hence, running ", multiple*coreCount, " spamfilter handlers.") + + for i := 0; i < multiple*coreCount; i++ { + go handler.beginServicingSpamFilter(i) + } + + handler.throughput.presentThroughput(5, handler.signalShutThroughput) +} + +func (h *TendermintHandler) beginServicingSpamFilter(id int) { + log.Info("Running TM side spam filter handler ", id) + // Register Messages + + // TODO - SpamFilter never has to consult RPC server currently - since only CsSt+ is supported, write for that. v0.2 prerelease + + for marlinMsg := range h.marlinFrom { + switch marlinMsg.Channel { + case channelBc: + h.throughput.putInfo("spam", "-CsBc", 1) + log.Debug("TMCore <-> Marlin Blockhain is not serviced") + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + case channelCsSt: + msg, err := h.decodeConsensusMsgFromChannelBuffer(marlinMsg.Packets) + if err != nil { + h.throughput.putInfo("spam", "-CsStUNK", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } else { + switch msg.(type) { + case *NewRoundStepMessage: + h.throughput.putInfo("spam", "+CsStNRS", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, true) + default: + h.throughput.putInfo("spam", "-CsStUNK", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } + } + case channelCsVo: + msg, err := h.decodeConsensusMsgFromChannelBuffer(marlinMsg.Packets) + if err != nil { + h.throughput.putInfo("spam", "-CsVoUNK", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } else { + switch msg.(type) { + case *VoteMessage: + if h.thoroughMessageCheck(msg) { + h.marlinTo <- h.spamVerdictMessage(marlinMsg, true) + h.throughput.putInfo("spam", "+CsVoVOT", uint32(len(marlinMsg.Packets))) + } else { + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + h.throughput.putInfo("spam", "-CsVoVOT", uint32(len(marlinMsg.Packets))) + } + default: + h.throughput.putInfo("spam", "-CsVoUNK", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } + } + case channelCsDc: + msg, err := h.decodeConsensusMsgFromChannelBuffer(marlinMsg.Packets) + if err != nil { + h.throughput.putInfo("spam", "-CsDcUNK", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } else { + switch msg.(type) { + case *ProposalMessage: + if h.thoroughMessageCheck(msg) { + h.marlinTo <- h.spamVerdictMessage(marlinMsg, true) + h.throughput.putInfo("spam", "+CsDcPRO", uint32(len(marlinMsg.Packets))) + } else { + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + h.throughput.putInfo("spam", "-CsDcPRO", uint32(len(marlinMsg.Packets))) + } + case *BlockPartMessage: + if h.thoroughMessageCheck(msg) { + h.marlinTo <- h.spamVerdictMessage(marlinMsg, true) + h.throughput.putInfo("spam", "+CsDcBPM", uint32(len(marlinMsg.Packets))) + } else { + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + h.throughput.putInfo("spam", "-CsDcBPM", uint32(len(marlinMsg.Packets))) + } + default: + h.throughput.putInfo("spam", "-CsVoUNK", uint32(len(marlinMsg.Packets))) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } + } + case channelCsVs: + h.throughput.putInfo("spam", "-CsVs", 1) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + log.Debug("TMCore <-> Marlin Consensensus Vote Set Bits Channel is not serviced") + case channelMm: + h.throughput.putInfo("spam", "-CsMm", 1) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + log.Debug("TMCore <-> Marlin Mempool Channel is not serviced") + case channelEv: + h.throughput.putInfo("spam", "-CsEv", 1) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + log.Debug("TMCore <-> MarlinEvidence Channel is not serviced") + default: + h.throughput.putInfo("spam", "-UnkUNK", 1) + h.marlinTo <- h.spamVerdictMessage(marlinMsg, false) + } + } +} + +func (h *TendermintHandler) thoroughMessageCheck(msg ConsensusMessage) bool { + switch msg.(type) { + case *VoteMessage: + if validator, ok := h.getValidators(msg.(*VoteMessage).Vote.Height); ok { + vidx := msg.(*VoteMessage).Vote.ValidatorIndex + vaddr := msg.(*VoteMessage).Vote.ValidatorAddress.String() + if vidx >= len(validator) || vaddr != validator[vidx].Address || + !validator[vidx].PublicKey.VerifyBytes(msg.(*VoteMessage).Vote.SignBytes("tendermint_.34", h.codec), msg.(*VoteMessage).Vote.Signature) { + return false + } + return true + } + return false + case *BlockPartMessage: + // Cache hash verification, needs Proposal message support + return false + case *ProposalMessage: + // if _, ok := h.getValidators(msg.(*ProposalMessage).Proposal.Height); ok { + // // Check signature, add to map so that BPM messages can be verified + // return true + // } + return false + default: + return false + } +} + +// to do +func (vote *Vote) SignBytes(chainID string, pb proto) []byte { + bz, err := proto.MarshalText(CanonicalizeVote(chainID, vote)) + if err != nil { + panic(err) + } + return bz +} + +func (h *TendermintHandler) getValidators(height int64) ([]Validator, bool) { + if height+10 < h.maxValidHeight { + // Don't service messages too old + return []Validator{}, false + } else if h.validatorCache.Contains(height) { + value, ok := h.validatorCache.Get(height) + return value.([]Validator), ok + } else { + // log.Info("Asked about height: ", height) + response, err := http.Get("http://"+h.rpcAddr+"/validators?height="+strconv.Itoa((int)(height))) + defer response.Body.Close() + if err != nil { + log.Error("Error while sending request to get validators at height: ", height, " err: ", err) + return []Validator{}, false + } else { + bodyBytes, err := ioutil.ReadAll(response.Body) + if err != nil { + log.Error("Error while parsing request to get validators at height: ", height, " err: ", err) + return []Validator{}, false + } + var jsonResult map[string]interface{} + json.Unmarshal(bodyBytes, &jsonResult) + // verify interface for errors + if _, errorFieldFound := jsonResult["error"]; errorFieldFound { + return []Validator{}, false + } + validatorInfo := jsonResult["result"].(map[string]interface{})["validators"].([]interface{}) + + var validatorSet []Validator + for _, v := range(validatorInfo) { + if v.(map[string]interface{})["pub_key"].(map[string]interface{})["type"] != "tendermint/PubKeyEd25519" { + log.Error("Not all keys of validators are tendermint/PubKeyEd25519. Cannot continue with this validator set from TMCore") + return []Validator{}, false + } + decodedSlice, err := b64.StdEncoding.DecodeString(v.(map[string]interface{})["pub_key"].(map[string]interface{})["value"].(string)) + if err != nil { + return []Validator{}, false + } + var decodedArray [32]byte + copy(decodedArray[:], decodedSlice[:32]) + validatorSet = append(validatorSet, + Validator{ + PublicKey: ed25519.PubKeyEd25519(decodedArray), + Address: v.(map[string]interface{})["address"].(string), + }) + } + h.validatorCache.Add(height, validatorSet) + + h.maxValidHeight = height + return validatorSet, true + } + } +} + +func (h *TendermintHandler) spamVerdictMessage(msg marlinTypes.MarlinMessage, allow bool) marlinTypes.MarlinMessage { + if allow { + return marlinTypes.MarlinMessage{ + ChainID: h.servicedChainId, + Channel: byte(0x01), + PacketId: msg.PacketId, + } + } else { + return marlinTypes.MarlinMessage{ + ChainID: h.servicedChainId, + Channel: byte(0x00), + PacketId: msg.PacketId, + } + } +} + +// ---------------------- KEY GENERATION INTERFACE ----------------------------- + +var ServicedKeyFile string = "tendermint_.34" +var isKeyFileUsed, memoized bool +var keyFileLocation string +var privateKey ed25519.PrivKeyEd25519 + +func GenerateKeyFile(fileLocation string) { + log.Info("Generating KeyPair for tendermint_.34-") + + privateKey := ed25519.GenPrivKey() + publicKey := privateKey.PubKey() + + key := keyData{ + Chain: "tendermint_.34", + IdString: string(hex.EncodeToString(publicKey.Address())), + PrivateKeyString: string(hex.EncodeToString(privateKey[:])), + PublicKeyString: string(hex.EncodeToString(publicKey.Bytes())), + PrivateKey: privateKey, + PublicKey: publicKey.(ed25519.PubKeyEd25519), + } + + log.Info("ID for node after generating KeyPair: ", key.IdString) + + encodedJson, err := json.MarshalIndent(&key, "", " ") + if err != nil { + log.Error("Error generating KeyFile: ", err) + } + err = ioutil.WriteFile(fileLocation, encodedJson, 0644) + if err != nil { + log.Error("Error generating KeyFile: ", err) + } + + log.Info("Successfully written keyfile ", fileLocation) +} + +func VerifyKeyFile(fileLocation string) (bool, error) { + log.Info("Accessing disk to extract info from KeyFile: ", fileLocation) + jsonFile, err := os.Open(fileLocation) + // if we os.Open returns an error then handle it + if err != nil { + log.Error("Error accessing file KeyFile: ", fileLocation, " error: ", err, ". exiting application.") + os.Exit(1) + } + defer jsonFile.Close() + + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + log.Error("Error decoding KeyFile: ", fileLocation, " error: ", err, ". exiting application.") + os.Exit(1) + } + var key keyData + json.Unmarshal(byteValue, &key) + + // TODO Check these conditions, add more checks - v0.2 prerelease + if key.Chain == "tendermint_.34" && string(hex.EncodeToString(key.PrivateKey[:])) == key.PrivateKeyString { + log.Info("Integrity for KeyFile: ", fileLocation, " checked. Integrity OK.") + return true, nil + } else { + log.Error("Integrity for KeyFile: ", fileLocation, " checked. Integrity NOT OK.") + return false, nil + } +} + +func getPrivateKey() ed25519.PrivKeyEd25519 { + if !isKeyFileUsed { + return ed25519.GenPrivKey() + } else { + if !memoized { + valid, err := VerifyKeyFile(keyFileLocation) + if err != nil { + log.Error("Error verifying keyfile integrity: ", keyFileLocation) + os.Exit(1) + } else if !valid { + os.Exit(1) + } + log.Info("Accessing disk to extract info from KeyFile: ", keyFileLocation) + jsonFile, err := os.Open(keyFileLocation) + // if we os.Open returns an error then handle it + if err != nil { + log.Error("Error accessing file KeyFile: ", keyFileLocation, " error: ", err, ". exiting application.") + os.Exit(1) + } + defer jsonFile.Close() + + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + log.Error("Error decoding KeyFile: ", keyFileLocation, " error: ", err, ". exiting application.") + os.Exit(1) + } + var key keyData + json.Unmarshal(byteValue, &key) + log.Info("Connector assumes for all connections henceforth the ID: ", key.IdString) + privateKey = key.PrivateKey + memoized = true + } + return privateKey + } +} + +// ---------------------- COMMON UTILITIES --------------------------------- + + +func createTMHandler(peerAddr string, + rpcAddr string, + marlinTo chan marlinTypes.MarlinMessage, + marlinFrom chan marlinTypes.MarlinMessage, + isConnectionOutgoing bool, + listenPort int, + isDataConnect bool) (TendermintHandler, error) { + chainId, ok := marlinTypes.ServicedChains["tendermint_.34"] + if !ok { + return TendermintHandler{}, errors.New("Cannot find tendermint_.34 in list of serviced chains by marlin connector") + } + + privateKey := getPrivateKey() + + vCache, err := lru.New2Q(500) + if err != nil { + return TendermintHandler{}, err + } + + return TendermintHandler{ + servicedChainId: chainId, + listenPort: listenPort, + isConnectionOutgoing: isConnectionOutgoing, + peerAddr: peerAddr, + rpcAddr: rpcAddr, + privateKey: privateKey, + protobuf: proto, + validatorCache: vCache, + marlinTo: marlinTo, + marlinFrom: marlinFrom, + channelBuffer: make(map[byte][]marlinTypes.PacketMsg), + throughput: throughPutData{ + isDataConnect: isDataConnect, + toTMCore: make(map[string]uint32), + fromTMCore: make(map[string]uint32), + spam: make(map[string]uint32), + }, + signalConnError: make(chan struct{}, 1), + signalShutSend: make(chan struct{}, 1), + signalShutRecv: make(chan struct{}, 1), + signalShutThroughput: make(chan struct{}, 1), + }, nil +} + +func (t *throughPutData) putInfo(direction string, key string, count uint32) { + t.mu.Lock() + switch direction { + case "to": + t.toTMCore[key] = t.toTMCore[key] + count + case "from": + t.fromTMCore[key] = t.fromTMCore[key] + count + case "spam": + t.spam[key] = t.spam[key] + count + } + t.mu.Unlock() +} + +func (t *throughPutData) presentThroughput(sec time.Duration, shutdownCh chan struct{}) { + for { + time.Sleep(sec * time.Second) + + select { + case <-shutdownCh: + return + default: + } + t.mu.Lock() + if t.isDataConnect { + log.Info(fmt.Sprintf("[DataConnect stats] To TMCore %v\tFrom TMCore %v", t.toTMCore, t.fromTMCore)) + } else { + log.Info(fmt.Sprintf("[SpamFilter stats] Served %v", t.spam)) + } + t.toTMCore = make(map[string]uint32) + t.fromTMCore = make(map[string]uint32) + t.spam = make(map[string]uint32) + t.mu.Unlock() + } +} + + +// --- EXTRAS + + +// TimeFormat is used for generating the sigs +// const TimeFormat = time.RFC3339Nano + +type CanonicalBlockID struct { + Hash cmn.HexBytes + PartsHeader CanonicalPartSetHeader +} + +type CanonicalPartSetHeader struct { + Hash cmn.HexBytes + Total int +} + +type CanonicalProposal struct { + Type byte // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + POLRound int64 `binary:"fixed64"` + BlockID CanonicalBlockID + Timestamp time.Time + ChainID string +} + +type CanonicalVote struct { + Type byte // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + BlockID CanonicalBlockID + Timestamp time.Time + ChainID string +} + +//----------------------------------- +// Canonicalize the structs + +func CanonicalizeBlockID(blockID BlockID) CanonicalBlockID { + return CanonicalBlockID{ + Hash: blockID.Hash, + PartsHeader: CanonicalizePartSetHeader(blockID.PartsHeader), + } +} + +func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { + return CanonicalPartSetHeader{ + psh.Hash, + psh.Total, + } +} + +func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { + return CanonicalProposal{ + Type: byte(0x20), + Height: proposal.Height, + Round: int64(proposal.Round), // cast int->int64 to make proto3 encode it fixed64 (does not work for int) + POLRound: int64(proposal.POLRound), + BlockID: CanonicalizeBlockID(proposal.BlockID), + Timestamp: proposal.Timestamp, + ChainID: chainID, + } +} + +func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { + return CanonicalVote{ + Type: vote.Type, + Height: vote.Height, + Round: int64(vote.Round), // cast int->int64 to make proto3 encode it fixed64 (does not work for int) + Timestamp: vote.Timestamp, + BlockID: CanonicalizeBlockID(vote.BlockID), + ChainID: chainID, + } +} \ No newline at end of file diff --git a/chains/tendermint_34/libs/.editorconfig b/chains/tendermint_34/libs/.editorconfig new file mode 100755 index 0000000..82f7743 --- /dev/null +++ b/chains/tendermint_34/libs/.editorconfig @@ -0,0 +1,19 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[Makefile] +indent_style = tab + +[*.sh] +indent_style = tab + +[*.proto] +indent_style = space +indent_size = 2 diff --git a/chains/tendermint_34/libs/.gitignore b/chains/tendermint_34/libs/.gitignore new file mode 100755 index 0000000..a2ebfde --- /dev/null +++ b/chains/tendermint_34/libs/.gitignore @@ -0,0 +1,5 @@ +*.sw[opqr] +vendor +.glide + +pubsub/query/fuzz_test/output diff --git a/chains/tendermint_34/libs/CHANGELOG.md b/chains/tendermint_34/libs/CHANGELOG.md new file mode 100755 index 0000000..0f900c5 --- /dev/null +++ b/chains/tendermint_34/libs/CHANGELOG.md @@ -0,0 +1,438 @@ +# Changelog + +## 0.9.0 + +*June 24th, 2018* + +BREAKING: + - [events, pubsub] Removed - moved to github.com/tendermint/tendermint + - [merkle] Use 20-bytes of SHA256 instead of RIPEMD160. NOTE: this package is + moving to github.com/tendermint/go-crypto ! + - [common] Remove gogoproto from KVPair types + - [common] Error simplification, #220 + +FEATURES: + + - [db/remotedb] New DB type using an external CLevelDB process via + GRPC + - [autofile] logjack command for piping stdin to a rotating file + - [bech32] New package. NOTE: should move out of here - it's just two small + functions + - [common] ColoredBytes([]byte) string for printing mixed ascii and bytes + - [db] DebugDB uses ColoredBytes() + +## 0.8.4 + +*June 5, 2018* + +IMPROVEMENTS: + + - [autofile] Flush on Stop; Close() method to Flush and close file + +## 0.8.3 + +*May 21, 2018* + +FEATURES: + + - [common] ASCIITrim() + +## 0.8.2 (April 23rd, 2018) + +FEATURES: + + - [pubsub] TagMap, NewTagMap + - [merkle] SimpleProofsFromMap() + - [common] IsASCIIText() + - [common] PrefixEndBytes // e.g. increment or nil + - [common] BitArray.MarshalJSON/.UnmarshalJSON + - [common] BitArray uses 'x' not 'X' for String() and above. + - [db] DebugDB shows better colorized output + +BUG FIXES: + + - [common] Fix TestParallelAbort nondeterministic failure #201/#202 + - [db] PrefixDB Iterator/ReverseIterator fixes + - [db] DebugDB fixes + +## 0.8.1 (April 5th, 2018) + +FEATURES: + + - [common] Error.Error() includes cause + - [common] IsEmpty() for 0 length + +## 0.8.0 (April 4th, 2018) + +BREAKING: + + - [merkle] `PutVarint->PutUvarint` in encodeByteSlice + - [db] batch.WriteSync() + - [common] Refactored and fixed `Parallel` function + - [common] Refactored `Rand` functionality + - [common] Remove unused `Right/LeftPadString` functions + - [common] Remove StackError, introduce Error interface (to replace use of pkg/errors) + +FEATURES: + + - [db] NewPrefixDB for a DB with all keys prefixed + - [db] NewDebugDB prints everything during operation + - [common] SplitAndTrim func + - [common] rand.Float64(), rand.Int63n(n), rand.Int31n(n) and global equivalents + - [common] HexBytes Format() + +BUG FIXES: + + - [pubsub] Fix unsubscribing + - [cli] Return config errors + - [common] Fix WriteFileAtomic Windows bug + +## 0.7.1 (March 22, 2018) + +IMPROVEMENTS: + + - glide -> dep + +BUG FIXES: + + - [common] Fix panic in NewBitArray for negative bits + - [common] Fix and simplify WriteFileAtomic so it cleans up properly + +## 0.7.0 (February 20, 2018) + +BREAKING: + + - [db] Major API upgrade. See `db/types.go`. + - [common] added `Quit() <-chan struct{}` to Service interface. + The returned channel is closed when service is stopped. + - [common] Remove HTTP functions + - [common] Heap.Push takes an `int`, new Heap.PushComparable takes the comparable. + - [logger] Removed. Use `log` + - [merkle] Major API updade - uses cmn.KVPairs. + - [cli] WriteDemoConfig -> WriteConfigValues + - [all] Remove go-wire dependency! + +FEATURES: + + - [db] New FSDB that uses the filesystem directly + - [common] HexBytes + - [common] KVPair and KI64Pair (protobuf based key-value pair objects) + +IMPROVEMENTS: + + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() + to CElement. These can be used instead of blocking `*Wait()` methods + if you need to be able to send quit signal and not block forever + - [common] IsHex handles 0x-prefix + +BUG FIXES: + + - [common] BitArray check for nil arguments + - [common] Fix memory leak in RepeatTimer + +## 0.6.0 (December 29, 2017) + +BREAKING: + - [cli] remove --root + - [pubsub] add String() method to Query interface + +IMPROVEMENTS: + - [common] use a thread-safe and well seeded non-crypto rng + +BUG FIXES + - [clist] fix misuse of wait group + - [common] introduce Ticker interface and logicalTicker for better testing of timers + +## 0.5.0 (December 5, 2017) + +BREAKING: + - [common] replace Service#Start, Service#Stop first return value (bool) with an + error (ErrAlreadyStarted, ErrAlreadyStopped) + - [common] replace Service#Reset first return value (bool) with an error + - [process] removed + +FEATURES: + - [common] IntInSlice and StringInSlice functions + - [pubsub/query] introduce `Condition` struct, expose `Operator`, and add `query.Conditions()` + +## 0.4.1 (November 27, 2017) + +FEATURES: + - [common] `Keys()` method on `CMap` + +IMPROVEMENTS: + - [log] complex types now encoded as "%+v" by default if `String()` method is undefined (previously resulted in error) + - [log] logger logs its own errors + +BUG FIXES: + - [common] fixed `Kill()` to build on Windows (Windows does not have `syscall.Kill`) + +## 0.4.0 (October 26, 2017) + +BREAKING: + - [common] GoPath is now a function + - [db] `DB` and `Iterator` interfaces have new methods to better support iteration + +FEATURES: + - [autofile] `Read([]byte)` and `Write([]byte)` methods on `Group` to support binary WAL + - [common] `Kill()` sends SIGTERM to the current process + +IMPROVEMENTS: + - comments and linting + +BUG FIXES: + - [events] fix allocation error prefixing cache with 1000 empty events + +## 0.3.2 (October 2, 2017) + +BUG FIXES: + +- [autofile] fix AutoFile.Sync() to open file if it's been closed +- [db] fix MemDb.Close() to not empty the database (ie. its just a noop) + + +## 0.3.1 (September 22, 2017) + +BUG FIXES: + +- [common] fix WriteFileAtomic to not use /tmp, which can be on another device + +## 0.3.0 (September 22, 2017) + +BREAKING CHANGES: + +- [log] logger functions no longer returns an error +- [common] NewBaseService takes the new logger +- [cli] RunCaptureWithArgs now captures stderr and stdout + - +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) + - -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (output string, err error) + +FEATURES: + +- [common] various common HTTP functionality +- [common] Date range parsing from string (ex. "2015-12-31:2017-12-31") +- [common] ProtocolAndAddress function +- [pubsub] New package for publish-subscribe with more advanced filtering + +BUG FIXES: + +- [common] fix atomicity of WriteFileAtomic by calling fsync +- [db] fix memDb iteration index out of range +- [autofile] fix Flush by calling fsync + +## 0.2.2 (June 16, 2017) + +FEATURES: + +- [common] IsHex and StripHex for handling `0x` prefixed hex strings +- [log] NewTracingLogger returns a logger that output error traces, ala `github.com/pkg/errors` + +IMPROVEMENTS: + +- [cli] Error handling for tests +- [cli] Support dashes in ENV variables + +BUG FIXES: + +- [flowrate] Fix non-deterministic test failures + +## 0.2.1 (June 2, 2017) + +FEATURES: + +- [cli] Log level parsing moved here from tendermint repo + +## 0.2.0 (May 18, 2017) + +BREAKING CHANGES: + +- [common] NewBaseService takes the new logger + + +FEATURES: + +- [cli] New library to standardize building command line tools +- [log] New logging library + +BUG FIXES: + +- [autofile] Close file before rotating + +## 0.1.0 (May 1, 2017) + +Initial release, combines what were previously independent repos: + +- go-autofile +- go-clist +- go-common +- go-db +- go-events +- go-flowrate +- go-logger +- go-merkle +- go-process + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/chains/tendermint_34/libs/README.md b/chains/tendermint_34/libs/README.md new file mode 100755 index 0000000..9ea618d --- /dev/null +++ b/chains/tendermint_34/libs/README.md @@ -0,0 +1,49 @@ +# TMLIBS + +This repo is a home for various small packages. + +## autofile + +Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens +when the leading file gets too big. Provides a reader for reading from the file group. + +## cli + +CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. + +## clist + +Clist provides a linekd list that is safe for concurrent access by many readers. + +## common + +Common provides a hodgepodge of useful functions. + +## db + +DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, +an implemention of LevelDB in Go, and the official LevelDB in C. + +## events + +Events is a synchronous PubSub package. + +## flowrate + +Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. + +## log + +Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. + +## merkle + +Merkle provides a simple static merkle tree and corresponding proofs. + +## process + +Process is a simple utility for spawning OS processes. + +## pubsub + +PubSub is an asynchronous PubSub package. diff --git a/chains/tendermint_34/libs/autofile/README.md b/chains/tendermint_34/libs/autofile/README.md new file mode 100755 index 0000000..2379920 --- /dev/null +++ b/chains/tendermint_34/libs/autofile/README.md @@ -0,0 +1 @@ +# go-autofile diff --git a/chains/tendermint_34/libs/autofile/autofile.go b/chains/tendermint_34/libs/autofile/autofile.go new file mode 100755 index 0000000..e428e26 --- /dev/null +++ b/chains/tendermint_34/libs/autofile/autofile.go @@ -0,0 +1,188 @@ +package autofile + +import ( + "os" + "os/signal" + "sync" + "syscall" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* AutoFile usage + +// Create/Append to ./autofile_test +af, err := OpenAutoFile("autofile_test") +if err != nil { + panic(err) +} + +// Stream of writes. +// During this time, the file may be moved e.g. by logRotate. +for i := 0; i < 60; i++ { + af.Write([]byte(Fmt("LOOP(%v)", i))) + time.Sleep(time.Second) +} + +// Close the AutoFile +err = af.Close() +if err != nil { + panic(err) +} +*/ + +const ( + autoFileClosePeriod = 1000 * time.Millisecond + autoFilePerms = os.FileMode(0600) +) + +// AutoFile automatically closes and re-opens file for writing. The file is +// automatically setup to close itself every 1s and upon receiving SIGHUP. +// +// This is useful for using a log file with the logrotate tool. +type AutoFile struct { + ID string + Path string + + closeTicker *time.Ticker + closeTickerStopc chan struct{} // closed when closeTicker is stopped + hupc chan os.Signal + + mtx sync.Mutex + file *os.File +} + +// OpenAutoFile creates an AutoFile in the path (with random ID). If there is +// an error, it will be of type *PathError or *ErrPermissionsChanged (if file's +// permissions got changed (should be 0600)). +func OpenAutoFile(path string) (*AutoFile, error) { + af := &AutoFile{ + ID: cmn.RandStr(12) + ":" + path, + Path: path, + closeTicker: time.NewTicker(autoFileClosePeriod), + closeTickerStopc: make(chan struct{}), + } + if err := af.openFile(); err != nil { + af.Close() + return nil, err + } + + // Close file on SIGHUP. + af.hupc = make(chan os.Signal, 1) + signal.Notify(af.hupc, syscall.SIGHUP) + go func() { + for range af.hupc { + af.closeFile() + } + }() + + go af.closeFileRoutine() + + return af, nil +} + +// Close shuts down the closing goroutine, SIGHUP handler and closes the +// AutoFile. +func (af *AutoFile) Close() error { + af.closeTicker.Stop() + close(af.closeTickerStopc) + if af.hupc != nil { + close(af.hupc) + } + return af.closeFile() +} + +func (af *AutoFile) closeFileRoutine() { + for { + select { + case <-af.closeTicker.C: + af.closeFile() + case <-af.closeTickerStopc: + return + } + } +} + +func (af *AutoFile) closeFile() (err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + file := af.file + if file == nil { + return nil + } + + af.file = nil + return file.Close() +} + +// Write writes len(b) bytes to the AutoFile. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +// Opens AutoFile if needed. +func (af *AutoFile) Write(b []byte) (n int, err error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err = af.openFile(); err != nil { + return + } + } + + n, err = af.file.Write(b) + return +} + +// Sync commits the current contents of the file to stable storage. Typically, +// this means flushing the file system's in-memory copy of recently written +// data to disk. +// Opens AutoFile if needed. +func (af *AutoFile) Sync() error { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err := af.openFile(); err != nil { + return err + } + } + return af.file.Sync() +} + +func (af *AutoFile) openFile() error { + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, autoFilePerms) + if err != nil { + return err + } + // fileInfo, err := file.Stat() + // if err != nil { + // return err + // } + // if fileInfo.Mode() != autoFilePerms { + // return errors.NewErrPermissionsChanged(file.Name(), fileInfo.Mode(), autoFilePerms) + // } + af.file = file + return nil +} + +// Size returns the size of the AutoFile. It returns -1 and an error if fails +// get stats or open file. +// Opens AutoFile if needed. +func (af *AutoFile) Size() (int64, error) { + af.mtx.Lock() + defer af.mtx.Unlock() + + if af.file == nil { + if err := af.openFile(); err != nil { + return -1, err + } + } + + stat, err := af.file.Stat() + if err != nil { + return -1, err + } + return stat.Size(), nil +} diff --git a/chains/tendermint_34/libs/autofile/autofile_test.go b/chains/tendermint_34/libs/autofile/autofile_test.go new file mode 100755 index 0000000..d9c9030 --- /dev/null +++ b/chains/tendermint_34/libs/autofile/autofile_test.go @@ -0,0 +1,122 @@ +package autofile + +import ( + "io/ioutil" + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestSIGHUP(t *testing.T) { + // First, create an AutoFile writing to a tempfile dir + file, err := ioutil.TempFile("", "sighup_test") + require.NoError(t, err) + err = file.Close() + require.NoError(t, err) + name := file.Name() + + // Here is the actual AutoFile + af, err := OpenAutoFile(name) + require.NoError(t, err) + + // Write to the file. + _, err = af.Write([]byte("Line 1\n")) + require.NoError(t, err) + _, err = af.Write([]byte("Line 2\n")) + require.NoError(t, err) + + // Move the file over + err = os.Rename(name, name+"_old") + require.NoError(t, err) + + // Send SIGHUP to self. + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + + // Wait a bit... signals are not handled synchronously. + time.Sleep(time.Millisecond * 10) + + // Write more to the file. + _, err = af.Write([]byte("Line 3\n")) + require.NoError(t, err) + _, err = af.Write([]byte("Line 4\n")) + require.NoError(t, err) + err = af.Close() + require.NoError(t, err) + + // Both files should exist + if body := cmn.MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { + t.Errorf("Unexpected body %s", body) + } + if body := cmn.MustReadFile(name); string(body) != "Line 3\nLine 4\n" { + t.Errorf("Unexpected body %s", body) + } +} + +// // Manually modify file permissions, close, and reopen using autofile: +// // We expect the file permissions to be changed back to the intended perms. +// func TestOpenAutoFilePerms(t *testing.T) { +// file, err := ioutil.TempFile("", "permission_test") +// require.NoError(t, err) +// err = file.Close() +// require.NoError(t, err) +// name := file.Name() + +// // open and change permissions +// af, err := OpenAutoFile(name) +// require.NoError(t, err) +// err = af.file.Chmod(0755) +// require.NoError(t, err) +// err = af.Close() +// require.NoError(t, err) + +// // reopen and expect an ErrPermissionsChanged as Cause +// af, err = OpenAutoFile(name) +// require.Error(t, err) +// if e, ok := err.(*errors.ErrPermissionsChanged); ok { +// t.Logf("%v", e) +// } else { +// t.Errorf("unexpected error %v", e) +// } +// } + +func TestAutoFileSize(t *testing.T) { + // First, create an AutoFile writing to a tempfile dir + f, err := ioutil.TempFile("", "sighup_test") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Here is the actual AutoFile. + af, err := OpenAutoFile(f.Name()) + require.NoError(t, err) + + // 1. Empty file + size, err := af.Size() + require.Zero(t, size) + require.NoError(t, err) + + // 2. Not empty file + data := []byte("Maniac\n") + _, err = af.Write(data) + require.NoError(t, err) + size, err = af.Size() + require.EqualValues(t, len(data), size) + require.NoError(t, err) + + // 3. Not existing file + err = af.Close() + require.NoError(t, err) + err = os.Remove(f.Name()) + require.NoError(t, err) + size, err = af.Size() + require.EqualValues(t, 0, size, "Expected a new file to be empty") + require.NoError(t, err) + + // Cleanup + _ = os.Remove(f.Name()) +} diff --git a/chains/tendermint_34/libs/autofile/cmd/logjack.go b/chains/tendermint_34/libs/autofile/cmd/logjack.go new file mode 100755 index 0000000..ead3f83 --- /dev/null +++ b/chains/tendermint_34/libs/autofile/cmd/logjack.go @@ -0,0 +1,106 @@ +package main + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + auto "github.com/tendermint/tendermint/libs/autofile" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const Version = "0.0.1" +const readBufferSize = 1024 // 1KB at a time + +// Parse command-line options +func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { + var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + var chopSizeStr, limitSizeStr string + flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") + flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") + flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") + flagSet.BoolVar(&version, "version", false, "Version") + flagSet.Parse(os.Args[1:]) + chopSize = parseBytesize(chopSizeStr) + limitSize = parseBytesize(limitSizeStr) + return +} + +func main() { + + // Read options + headPath, chopSize, limitSize, version := parseFlags() + if version { + fmt.Printf("logjack version %v\n", Version) + return + } + + // Open Group + group, err := auto.OpenGroup(headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) + if err != nil { + fmt.Printf("logjack couldn't create output file %v\n", headPath) + os.Exit(1) + } + + err = group.Start() + if err != nil { + fmt.Printf("logjack couldn't start with file %v\n", headPath) + os.Exit(1) + } + + go func() { + // Forever, read from stdin and write to AutoFile. + buf := make([]byte, readBufferSize) + for { + n, err := os.Stdin.Read(buf) + group.Write(buf[:n]) + group.Flush() + if err != nil { + group.Stop() + if err == io.EOF { + os.Exit(0) + } else { + fmt.Println("logjack errored") + os.Exit(1) + } + } + } + }() + + // Trap signal + cmn.TrapSignal(func() { + fmt.Println("logjack shutting down") + }) +} + +func parseBytesize(chopSize string) int64 { + // Handle suffix multiplier + var multiplier int64 = 1 + if strings.HasSuffix(chopSize, "T") { + multiplier = 1042 * 1024 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "G") { + multiplier = 1042 * 1024 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "M") { + multiplier = 1042 * 1024 + chopSize = chopSize[:len(chopSize)-1] + } + if strings.HasSuffix(chopSize, "K") { + multiplier = 1042 + chopSize = chopSize[:len(chopSize)-1] + } + + // Parse the numeric part + chopSizeInt, err := strconv.Atoi(chopSize) + if err != nil { + panic(err) + } + + return int64(chopSizeInt) * multiplier +} diff --git a/chains/tendermint_34/libs/autofile/group.go b/chains/tendermint_34/libs/autofile/group.go new file mode 100755 index 0000000..aac911a --- /dev/null +++ b/chains/tendermint_34/libs/autofile/group.go @@ -0,0 +1,764 @@ +package autofile + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + defaultGroupCheckDuration = 5000 * time.Millisecond + defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB + defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB + maxFilesToRemove = 4 // needs to be greater than 1 +) + +/* +You can open a Group to keep restrictions on an AutoFile, like +the maximum size of each chunk, and/or the total amount of bytes +stored in the group. + +The first file to be written in the Group.Dir is the head file. + + Dir/ + - + +Once the Head file reaches the size limit, it will be rotated. + + Dir/ + - .000 // First rolled file + - // New head path, starts empty. + // The implicit index is 001. + +As more files are written, the index numbers grow... + + Dir/ + - .000 // First rolled file + - .001 // Second rolled file + - ... + - // New head path + +The Group can also be used to binary-search for some line, +assuming that marker lines are written occasionally. +*/ +type Group struct { + cmn.BaseService + + ID string + Head *AutoFile // The head AutoFile to write to + headBuf *bufio.Writer + Dir string // Directory that contains .Head + ticker *time.Ticker + mtx sync.Mutex + headSizeLimit int64 + totalSizeLimit int64 + groupCheckDuration time.Duration + minIndex int // Includes head + maxIndex int // Includes head, where Head will move to + + // close this when the processTicks routine is done. + // this ensures we can cleanup the dir after calling Stop + // and the routine won't be trying to access it anymore + doneProcessTicks chan struct{} +} + +// OpenGroup creates a new Group with head at headPath. It returns an error if +// it fails to open head file. +func OpenGroup(headPath string, groupOptions ...func(*Group)) (g *Group, err error) { + dir := path.Dir(headPath) + head, err := OpenAutoFile(headPath) + if err != nil { + return nil, err + } + + g = &Group{ + ID: "group:" + head.ID, + Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), + Dir: dir, + headSizeLimit: defaultHeadSizeLimit, + totalSizeLimit: defaultTotalSizeLimit, + groupCheckDuration: defaultGroupCheckDuration, + minIndex: 0, + maxIndex: 0, + doneProcessTicks: make(chan struct{}), + } + + for _, option := range groupOptions { + option(g) + } + + g.BaseService = *cmn.NewBaseService(nil, "Group", g) + + gInfo := g.readGroupInfo() + g.minIndex = gInfo.MinIndex + g.maxIndex = gInfo.MaxIndex + return +} + +// GroupCheckDuration allows you to overwrite default groupCheckDuration. +func GroupCheckDuration(duration time.Duration) func(*Group) { + return func(g *Group) { + g.groupCheckDuration = duration + } +} + +// GroupHeadSizeLimit allows you to overwrite default head size limit - 10MB. +func GroupHeadSizeLimit(limit int64) func(*Group) { + return func(g *Group) { + g.headSizeLimit = limit + } +} + +// GroupTotalSizeLimit allows you to overwrite default total size limit of the group - 1GB. +func GroupTotalSizeLimit(limit int64) func(*Group) { + return func(g *Group) { + g.totalSizeLimit = limit + } +} + +// OnStart implements Service by starting the goroutine that checks file and +// group limits. +func (g *Group) OnStart() error { + g.ticker = time.NewTicker(g.groupCheckDuration) + go g.processTicks() + return nil +} + +// OnStop implements Service by stopping the goroutine described above. +// NOTE: g.Head must be closed separately using Close. +func (g *Group) OnStop() { + g.ticker.Stop() + g.Flush() // flush any uncommitted data +} + +func (g *Group) Wait() { + // wait for processTicks routine to finish + <-g.doneProcessTicks +} + +// Close closes the head file. The group must be stopped by this moment. +func (g *Group) Close() { + g.Flush() // flush any uncommitted data + + g.mtx.Lock() + _ = g.Head.closeFile() + g.mtx.Unlock() +} + +// HeadSizeLimit returns the current head size limit. +func (g *Group) HeadSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headSizeLimit +} + +// TotalSizeLimit returns total size limit of the group. +func (g *Group) TotalSizeLimit() int64 { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.totalSizeLimit +} + +// MaxIndex returns index of the last file in the group. +func (g *Group) MaxIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.maxIndex +} + +// MinIndex returns index of the first file in the group. +func (g *Group) MinIndex() int { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.minIndex +} + +func (g *Group) Write(p []byte) (nn int, err error) { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.headBuf.Write(p) +} + +func (g *Group) WriteLine(line string) error { + g.mtx.Lock() + defer g.mtx.Unlock() + _, err := g.headBuf.Write([]byte(line + "\n")) + return err +} + +// Flush writes any buffered data to the underlying file and commits the +// current content of the file to stable storage. +func (g *Group) Flush() error { + g.mtx.Lock() + defer g.mtx.Unlock() + err := g.headBuf.Flush() + if err == nil { + err = g.Head.Sync() + } + return err +} + +func (g *Group) processTicks() { + defer close(g.doneProcessTicks) + for { + select { + case <-g.ticker.C: + g.checkHeadSizeLimit() + g.checkTotalSizeLimit() + case <-g.Quit(): + return + } + } +} + +// NOTE: this function is called manually in tests. +func (g *Group) checkHeadSizeLimit() { + limit := g.HeadSizeLimit() + if limit == 0 { + return + } + size, err := g.Head.Size() + if err != nil { + g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path, "err", err) + return + } + if size >= limit { + g.RotateFile() + } +} + +func (g *Group) checkTotalSizeLimit() { + limit := g.TotalSizeLimit() + if limit == 0 { + return + } + + gInfo := g.readGroupInfo() + totalSize := gInfo.TotalSize + for i := 0; i < maxFilesToRemove; i++ { + index := gInfo.MinIndex + i + if totalSize < limit { + return + } + if index == gInfo.MaxIndex { + // Special degenerate case, just do nothing. + g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path) + return + } + pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) + fInfo, err := os.Stat(pathToRemove) + if err != nil { + g.Logger.Error("Failed to fetch info for file", "file", pathToRemove) + continue + } + err = os.Remove(pathToRemove) + if err != nil { + g.Logger.Error("Failed to remove path", "path", pathToRemove) + return + } + totalSize -= fInfo.Size() + } +} + +// RotateFile causes group to close the current head and assign it some index. +// Note it does not create a new head. +func (g *Group) RotateFile() { + g.mtx.Lock() + defer g.mtx.Unlock() + + headPath := g.Head.Path + + if err := g.headBuf.Flush(); err != nil { + panic(err) + } + + if err := g.Head.Sync(); err != nil { + panic(err) + } + + if err := g.Head.closeFile(); err != nil { + panic(err) + } + + indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) + if err := os.Rename(headPath, indexPath); err != nil { + panic(err) + } + + g.maxIndex++ +} + +// NewReader returns a new group reader. +// CONTRACT: Caller must close the returned GroupReader. +func (g *Group) NewReader(index int) (*GroupReader, error) { + r := newGroupReader(g) + err := r.SetIndex(index) + if err != nil { + return nil, err + } + return r, nil +} + +// Returns -1 if line comes after, 0 if found, 1 if line comes before. +type SearchFunc func(line string) (int, error) + +// Searches for the right file in Group, then returns a GroupReader to start +// streaming lines. +// Returns true if an exact match was found, otherwise returns the next greater +// line that starts with prefix. +// CONTRACT: Caller must close the returned GroupReader +func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + // Now minIndex/maxIndex may change meanwhile, + // but it shouldn't be a big deal + // (maybe we'll want to limit scanUntil though) + + for { + curIndex := (minIndex + maxIndex + 1) / 2 + + // Base case, when there's only 1 choice left. + if minIndex == maxIndex { + r, err := g.NewReader(maxIndex) + if err != nil { + return nil, false, err + } + match, err := scanUntil(r, prefix, cmp) + if err != nil { + r.Close() + return nil, false, err + } + return r, match, err + } + + // Read starting roughly at the middle file, + // until we find line that has prefix. + r, err := g.NewReader(curIndex) + if err != nil { + return nil, false, err + } + foundIndex, line, err := scanNext(r, prefix) + r.Close() + if err != nil { + return nil, false, err + } + + // Compare this line to our search query. + val, err := cmp(line) + if err != nil { + return nil, false, err + } + if val < 0 { + // Line will come later + minIndex = foundIndex + } else if val == 0 { + // Stroke of luck, found the line + r, err := g.NewReader(foundIndex) + if err != nil { + return nil, false, err + } + match, err := scanUntil(r, prefix, cmp) + if !match { + panic("Expected match to be true") + } + if err != nil { + r.Close() + return nil, false, err + } + return r, true, err + } else { + // We passed it + maxIndex = curIndex - 1 + } + } + +} + +// Scans and returns the first line that starts with 'prefix' +// Consumes line and returns it. +func scanNext(r *GroupReader, prefix string) (int, string, error) { + for { + line, err := r.ReadLine() + if err != nil { + return 0, "", err + } + if !strings.HasPrefix(line, prefix) { + continue + } + index := r.CurIndex() + return index, line, nil + } +} + +// Returns true iff an exact match was found. +// Pushes line, does not consume it. +func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { + for { + line, err := r.ReadLine() + if err != nil { + return false, err + } + if !strings.HasPrefix(line, prefix) { + continue + } + val, err := cmp(line) + if err != nil { + return false, err + } + if val < 0 { + continue + } else if val == 0 { + r.PushLine(line) + return true, nil + } else { + r.PushLine(line) + return false, nil + } + } +} + +// Searches backwards for the last line in Group with prefix. +// Scans each file forward until the end to find the last match. +func (g *Group) FindLast(prefix string) (match string, found bool, err error) { + g.mtx.Lock() + minIndex, maxIndex := g.minIndex, g.maxIndex + g.mtx.Unlock() + + r, err := g.NewReader(maxIndex) + if err != nil { + return "", false, err + } + defer r.Close() + + // Open files from the back and read +GROUP_LOOP: + for i := maxIndex; i >= minIndex; i-- { + err := r.SetIndex(i) + if err != nil { + return "", false, err + } + // Scan each line and test whether line matches + for { + line, err := r.ReadLine() + if err == io.EOF { + if found { + return match, found, nil + } + continue GROUP_LOOP + } else if err != nil { + return "", false, err + } + if strings.HasPrefix(line, prefix) { + match = line + found = true + } + if r.CurIndex() > i { + if found { + return match, found, nil + } + continue GROUP_LOOP + } + } + } + + return +} + +// GroupInfo holds information about the group. +type GroupInfo struct { + MinIndex int // index of the first file in the group, including head + MaxIndex int // index of the last file in the group, including head + TotalSize int64 // total size of the group + HeadSize int64 // size of the head +} + +// Returns info after scanning all files in g.Head's dir. +func (g *Group) ReadGroupInfo() GroupInfo { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.readGroupInfo() +} + +// Index includes the head. +// CONTRACT: caller should have called g.mtx.Lock +func (g *Group) readGroupInfo() GroupInfo { + groupDir := filepath.Dir(g.Head.Path) + headBase := filepath.Base(g.Head.Path) + var minIndex, maxIndex int = -1, -1 + var totalSize, headSize int64 = 0, 0 + + dir, err := os.Open(groupDir) + if err != nil { + panic(err) + } + defer dir.Close() + fiz, err := dir.Readdir(0) + if err != nil { + panic(err) + } + + // For each file in the directory, filter by pattern + for _, fileInfo := range fiz { + if fileInfo.Name() == headBase { + fileSize := fileInfo.Size() + totalSize += fileSize + headSize = fileSize + continue + } else if strings.HasPrefix(fileInfo.Name(), headBase) { + fileSize := fileInfo.Size() + totalSize += fileSize + indexedFilePattern := regexp.MustCompile(`^.+\.([0-9]{3,})$`) + submatch := indexedFilePattern.FindSubmatch([]byte(fileInfo.Name())) + if len(submatch) != 0 { + // Matches + fileIndex, err := strconv.Atoi(string(submatch[1])) + if err != nil { + panic(err) + } + if maxIndex < fileIndex { + maxIndex = fileIndex + } + if minIndex == -1 || fileIndex < minIndex { + minIndex = fileIndex + } + } + } + } + + // Now account for the head. + if minIndex == -1 { + // If there were no numbered files, + // then the head is index 0. + minIndex, maxIndex = 0, 0 + } else { + // Otherwise, the head file is 1 greater + maxIndex++ + } + return GroupInfo{minIndex, maxIndex, totalSize, headSize} +} + +func filePathForIndex(headPath string, index int, maxIndex int) string { + if index == maxIndex { + return headPath + } + return fmt.Sprintf("%v.%03d", headPath, index) +} + +//-------------------------------------------------------------------------------- + +// GroupReader provides an interface for reading from a Group. +type GroupReader struct { + *Group + mtx sync.Mutex + curIndex int + curFile *os.File + curReader *bufio.Reader + curLine []byte +} + +func newGroupReader(g *Group) *GroupReader { + return &GroupReader{ + Group: g, + curIndex: 0, + curFile: nil, + curReader: nil, + curLine: nil, + } +} + +// Close closes the GroupReader by closing the cursor file. +func (gr *GroupReader) Close() error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curReader != nil { + err := gr.curFile.Close() + gr.curIndex = 0 + gr.curReader = nil + gr.curFile = nil + gr.curLine = nil + return err + } + return nil +} + +// Read implements io.Reader, reading bytes from the current Reader +// incrementing index until enough bytes are read. +func (gr *GroupReader) Read(p []byte) (n int, err error) { + lenP := len(p) + if lenP == 0 { + return 0, errors.New("given empty slice") + } + + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // Open file if not open yet + if gr.curReader == nil { + if err = gr.openFile(gr.curIndex); err != nil { + return 0, err + } + } + + // Iterate over files until enough bytes are read + var nn int + for { + nn, err = gr.curReader.Read(p[n:]) + n += nn + if err == io.EOF { + if n >= lenP { + return n, nil + } + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return n, err1 + } + } else if err != nil { + return n, err + } else if nn == 0 { // empty file + return n, err + } + } +} + +// ReadLine reads a line (without delimiter). +// just return io.EOF if no new lines found. +func (gr *GroupReader) ReadLine() (string, error) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + // From PushLine + if gr.curLine != nil { + line := string(gr.curLine) + gr.curLine = nil + return line, nil + } + + // Open file if not open yet + if gr.curReader == nil { + err := gr.openFile(gr.curIndex) + if err != nil { + return "", err + } + } + + // Iterate over files until line is found + var linePrefix string + for { + bytesRead, err := gr.curReader.ReadBytes('\n') + if err == io.EOF { + // Open the next file + if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { + return "", err1 + } + if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil + } + linePrefix += string(bytesRead) + continue + } else if err != nil { + return "", err + } + return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil + } +} + +// IF index > gr.Group.maxIndex, returns io.EOF +// CONTRACT: caller should hold gr.mtx +func (gr *GroupReader) openFile(index int) error { + // Lock on Group to ensure that head doesn't move in the meanwhile. + gr.Group.mtx.Lock() + defer gr.Group.mtx.Unlock() + + if index > gr.Group.maxIndex { + return io.EOF + } + + curFilePath := filePathForIndex(gr.Head.Path, index, gr.Group.maxIndex) + curFile, err := os.OpenFile(curFilePath, os.O_RDONLY|os.O_CREATE, autoFilePerms) + if err != nil { + return err + } + curReader := bufio.NewReader(curFile) + + // Update gr.cur* + if gr.curFile != nil { + gr.curFile.Close() + } + gr.curIndex = index + gr.curFile = curFile + gr.curReader = curReader + gr.curLine = nil + return nil +} + +// PushLine makes the given line the current one, so the next time somebody +// calls ReadLine, this line will be returned. +// panics if called twice without calling ReadLine. +func (gr *GroupReader) PushLine(line string) { + gr.mtx.Lock() + defer gr.mtx.Unlock() + + if gr.curLine == nil { + gr.curLine = []byte(line) + } else { + panic("PushLine failed, already have line") + } +} + +// CurIndex returns cursor's file index. +func (gr *GroupReader) CurIndex() int { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.curIndex +} + +// SetIndex sets the cursor's file index to index by opening a file at this +// position. +func (gr *GroupReader) SetIndex(index int) error { + gr.mtx.Lock() + defer gr.mtx.Unlock() + return gr.openFile(index) +} + +//-------------------------------------------------------------------------------- + +// A simple SearchFunc that assumes that the marker is of form +// . +// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: +// +// #HEIGHT:1 +// ... +// #HEIGHT:2 +// ... +func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { + return func(line string) (int, error) { + if !strings.HasPrefix(line, prefix) { + return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix) + } + i, err := strconv.Atoi(line[len(prefix):]) + if err != nil { + return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error()) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } +} diff --git a/chains/tendermint_34/libs/autofile/group_test.go b/chains/tendermint_34/libs/autofile/group_test.go new file mode 100755 index 0000000..e173e49 --- /dev/null +++ b/chains/tendermint_34/libs/autofile/group_test.go @@ -0,0 +1,437 @@ +package autofile + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group { + testID := cmn.RandStr(12) + testDir := "_test_" + testID + err := cmn.EnsureDir(testDir, 0700) + require.NoError(t, err, "Error creating dir") + + headPath := testDir + "/myfile" + g, err := OpenGroup(headPath, GroupHeadSizeLimit(headSizeLimit)) + require.NoError(t, err, "Error opening Group") + require.NotEqual(t, nil, g, "Failed to create Group") + + return g +} + +func destroyTestGroup(t *testing.T, g *Group) { + g.Close() + + err := os.RemoveAll(g.Dir) + require.NoError(t, err, "Error removing test Group directory") +} + +func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, totalSize, headSize int64) { + assert.Equal(t, minIndex, gInfo.MinIndex) + assert.Equal(t, maxIndex, gInfo.MaxIndex) + assert.Equal(t, totalSize, gInfo.TotalSize) + assert.Equal(t, headSize, gInfo.HeadSize) +} + +func TestCheckHeadSizeLimit(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 1000*1000) + + // At first, there are no files. + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + } + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + + // Even calling checkHeadSizeLimit manually won't rotate it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) + + // Write 1000 more bytes. + err := g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + g.Flush() + + // Calling checkHeadSizeLimit this time rolls it. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) + + // Write 1000 more bytes. + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + g.Flush() + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) + + // Write 1000 bytes 999 times. + for i := 0; i < 999; i++ { + err = g.WriteLine(cmn.RandStr(999)) + require.NoError(t, err, "Error appending to head") + } + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) + + // Calling checkHeadSizeLimit rolls it again. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) + + // Write 1000 more bytes. + _, err = g.Head.Write([]byte(cmn.RandStr(999) + "\n")) + require.NoError(t, err, "Error appending to head") + g.Flush() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Calling checkHeadSizeLimit does nothing. + g.checkHeadSizeLimit() + assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestSearch(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 10*1000) + + // Create some files in the group that have several INFO lines in them. + // Try to put the INFO lines in various spots. + for i := 0; i < 100; i++ { + // The random junk at the end ensures that this INFO linen + // is equally likely to show up at the end. + _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) + require.NoError(t, err, "Failed to write to head") + g.checkHeadSizeLimit() + for j := 0; j < 10; j++ { + _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) + require.NoError(t, err1, "Failed to write to head") + g.checkHeadSizeLimit() + } + } + + // Create a search func that searches for line + makeSearchFunc := func(target int) SearchFunc { + return func(line string) (int, error) { + parts := strings.Split(line, " ") + if len(parts) != 3 { + return -1, errors.New("Line did not have 3 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("Failed to parse INFO: " + err.Error()) + } + if target < i { + return 1, nil + } else if target == i { + return 0, nil + } else { + return -1, nil + } + } + } + + // Now search for each number + for i := 0; i < 100; i++ { + gr, match, err := g.Search("INFO", makeSearchFunc(i)) + require.NoError(t, err, "Failed to search for line, tc #%d", i) + assert.True(t, match, "Expected Search to return exact match, tc #%d", i) + line, err := gr.ReadLine() + require.NoError(t, err, "Failed to read line after search, tc #%d", i) + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { + t.Fatalf("Failed to get correct line, tc #%d", i) + } + // Make sure we can continue to read from there. + cur := i + 1 + for { + line, err := gr.ReadLine() + if err == io.EOF { + if cur == 99+1 { + // OK! + break + } else { + t.Fatalf("Got EOF after the wrong INFO #, tc #%d", i) + } + } else if err != nil { + t.Fatalf("Error reading line, tc #%d, err:\n%s", i, err) + } + if !strings.HasPrefix(line, "INFO ") { + continue + } + if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { + t.Fatalf("Unexpected INFO #. Expected %v got:\n%v, tc #%d", cur, line, i) + } + cur++ + } + gr.Close() + } + + // Now search for something that is too small. + // We should get the first available line. + { + gr, match, err := g.Search("INFO", makeSearchFunc(-999)) + require.NoError(t, err, "Failed to search for line") + assert.False(t, match, "Expected Search to not return exact match") + line, err := gr.ReadLine() + require.NoError(t, err, "Failed to read line after search") + if !strings.HasPrefix(line, "INFO 0 ") { + t.Error("Failed to fetch correct line, which is the earliest INFO") + } + err = gr.Close() + require.NoError(t, err, "Failed to close GroupReader") + } + + // Now search for something that is too large. + // We should get an EOF error. + { + gr, _, err := g.Search("INFO", makeSearchFunc(999)) + assert.Equal(t, io.EOF, err) + assert.Nil(t, gr) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestRotateFile(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + // Read g.Head.Path+"000" + body1, err := ioutil.ReadFile(g.Head.Path + ".000") + assert.NoError(t, err, "Failed to read first rolled file") + if string(body1) != "Line 1\nLine 2\nLine 3\n" { + t.Errorf("Got unexpected contents: [%v]", string(body1)) + } + + // Read g.Head.Path + body2, err := ioutil.ReadFile(g.Head.Path) + assert.NoError(t, err, "Failed to read first rolled file") + if string(body2) != "Line 4\nLine 5\nLine 6\n" { + t.Errorf("Got unexpected contents: [%v]", string(body2)) + } + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast1(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("# a") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.WriteLine("# b") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast2(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("# a") + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("# b") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast3(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("# a") + g.WriteLine("Line 2") + g.WriteLine("# b") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, "# b", match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestFindLast4(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + g.WriteLine("Line 1") + g.WriteLine("Line 2") + g.WriteLine("Line 3") + g.Flush() + g.RotateFile() + g.WriteLine("Line 4") + g.WriteLine("Line 5") + g.WriteLine("Line 6") + g.Flush() + + match, found, err := g.FindLast("#") + assert.NoError(t, err) + assert.False(t, found) + assert.Empty(t, match) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestWrite(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + written := []byte("Medusa") + g.Write(written) + g.Flush() + + read := make([]byte, len(written)) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + _, err = gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, written, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read reads the required amount of bytes from all the files in the +// group and returns no error if n == size of the given slice. +func TestGroupReaderRead(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + g.Write(frankenstein) + g.Flush() + + totalWrittenLength := len(professor) + len(frankenstein) + read := make([]byte, totalWrittenLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + n, err := gr.Read(read) + assert.NoError(t, err, "failed to read data") + assert.Equal(t, totalWrittenLength, n, "not enough bytes read") + professorPlusFrankenstein := professor + professorPlusFrankenstein = append(professorPlusFrankenstein, frankenstein...) + assert.Equal(t, professorPlusFrankenstein, read) + + // Cleanup + destroyTestGroup(t, g) +} + +// test that Read returns an error if number of bytes read < size of +// the given slice. Subsequent call should return 0, io.EOF. +func TestGroupReaderRead2(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + professor := []byte("Professor Monster") + g.Write(professor) + g.Flush() + g.RotateFile() + frankenstein := []byte("Frankenstein's Monster") + frankensteinPart := []byte("Frankenstein") + g.Write(frankensteinPart) // note writing only a part + g.Flush() + + totalLength := len(professor) + len(frankenstein) + read := make([]byte, totalLength) + gr, err := g.NewReader(0) + require.NoError(t, err, "failed to create reader") + + // 1) n < (size of the given slice), io.EOF + n, err := gr.Read(read) + assert.Equal(t, io.EOF, err) + assert.Equal(t, len(professor)+len(frankensteinPart), n, "Read more/less bytes than it is in the group") + + // 2) 0, io.EOF + n, err = gr.Read([]byte("0")) + assert.Equal(t, io.EOF, err) + assert.Equal(t, 0, n) + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMinIndex(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning") + + // Cleanup + destroyTestGroup(t, g) +} + +func TestMaxIndex(t *testing.T) { + g := createTestGroupWithHeadSizeLimit(t, 0) + + assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") + + g.WriteLine("Line 1") + g.Flush() + g.RotateFile() + + assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") + + // Cleanup + destroyTestGroup(t, g) +} diff --git a/chains/tendermint_34/libs/bech32/bech32.go b/chains/tendermint_34/libs/bech32/bech32.go new file mode 100755 index 0000000..a4db86d --- /dev/null +++ b/chains/tendermint_34/libs/bech32/bech32.go @@ -0,0 +1,29 @@ +package bech32 + +import ( + "github.com/btcsuite/btcutil/bech32" + "github.com/pkg/errors" +) + +//ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 +func ConvertAndEncode(hrp string, data []byte) (string, error) { + converted, err := bech32.ConvertBits(data, 8, 5, true) + if err != nil { + return "", errors.Wrap(err, "encoding bech32 failed") + } + return bech32.Encode(hrp, converted) + +} + +//DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes +func DecodeAndConvert(bech string) (string, []byte, error) { + hrp, data, err := bech32.Decode(bech) + if err != nil { + return "", nil, errors.Wrap(err, "decoding bech32 failed") + } + converted, err := bech32.ConvertBits(data, 5, 8, false) + if err != nil { + return "", nil, errors.Wrap(err, "decoding bech32 failed") + } + return hrp, converted, nil +} diff --git a/chains/tendermint_34/libs/bech32/bech32_test.go b/chains/tendermint_34/libs/bech32/bech32_test.go new file mode 100755 index 0000000..8309420 --- /dev/null +++ b/chains/tendermint_34/libs/bech32/bech32_test.go @@ -0,0 +1,31 @@ +package bech32_test + +import ( + "bytes" + "crypto/sha256" + "testing" + + "github.com/tendermint/tendermint/libs/bech32" +) + +func TestEncodeAndDecode(t *testing.T) { + + sum := sha256.Sum256([]byte("hello world\n")) + + bech, err := bech32.ConvertAndEncode("shasum", sum[:]) + + if err != nil { + t.Error(err) + } + hrp, data, err := bech32.DecodeAndConvert(bech) + + if err != nil { + t.Error(err) + } + if hrp != "shasum" { + t.Error("Invalid hrp") + } + if !bytes.Equal(data, sum[:]) { + t.Error("Invalid decode") + } +} diff --git a/chains/tendermint_34/libs/circle.yml b/chains/tendermint_34/libs/circle.yml new file mode 100755 index 0000000..390ffb0 --- /dev/null +++ b/chains/tendermint_34/libs/circle.yml @@ -0,0 +1,21 @@ +machine: + environment: + GOPATH: "${HOME}/.go_workspace" + PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" + PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + hosts: + localhost: 127.0.0.1 + +dependencies: + override: + - mkdir -p "$PROJECT_PARENT_PATH" + - ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH" + post: + - go version + +test: + override: + - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh + post: + - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt + - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/chains/tendermint_34/libs/cli/flags/log_level.go b/chains/tendermint_34/libs/cli/flags/log_level.go new file mode 100755 index 0000000..156106a --- /dev/null +++ b/chains/tendermint_34/libs/cli/flags/log_level.go @@ -0,0 +1,86 @@ +package flags + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultLogLevelKey = "*" +) + +// ParseLogLevel parses complex log level - comma-separated +// list of module:level pairs with an optional *:level pair (* means +// all other modules). +// +// Example: +// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") +func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { + if lvl == "" { + return nil, errors.New("Empty log level") + } + + l := lvl + + // prefix simple one word levels (e.g. "info") with "*" + if !strings.Contains(l, ":") { + l = defaultLogLevelKey + ":" + l + } + + options := make([]log.Option, 0) + + isDefaultLogLevelSet := false + var option log.Option + var err error + + list := strings.Split(l, ",") + for _, item := range list { + moduleAndLevel := strings.Split(item, ":") + + if len(moduleAndLevel) != 2 { + return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list) + } + + module := moduleAndLevel[0] + level := moduleAndLevel[1] + + if module == defaultLogLevelKey { + option, err = log.AllowLevel(level) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l)) + } + options = append(options, option) + isDefaultLogLevelSet = true + } else { + switch level { + case "debug": + option = log.AllowDebugWith("module", module) + case "info": + option = log.AllowInfoWith("module", module) + case "error": + option = log.AllowErrorWith("module", module) + case "none": + option = log.AllowNoneWith("module", module) + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list) + } + options = append(options, option) + + } + } + + // if "*" is not provided, set default global level + if !isDefaultLogLevelSet { + option, err = log.AllowLevel(defaultLogLevelValue) + if err != nil { + return nil, err + } + options = append(options, option) + } + + return log.NewFilter(logger, options...), nil +} diff --git a/chains/tendermint_34/libs/cli/flags/log_level_test.go b/chains/tendermint_34/libs/cli/flags/log_level_test.go new file mode 100755 index 0000000..c4c1707 --- /dev/null +++ b/chains/tendermint_34/libs/cli/flags/log_level_test.go @@ -0,0 +1,94 @@ +package flags_test + +import ( + "bytes" + "strings" + "testing" + + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultLogLevelValue = "info" +) + +func TestParseLogLevel(t *testing.T) { + var buf bytes.Buffer + jsonLogger := log.NewTMJSONLogger(&buf) + + correctLogLevels := []struct { + lvl string + expectedLogLines []string + }{ + {"mempool:error", []string{ + ``, // if no default is given, assume info + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, // if no default is given, assume info + ``}}, + + {"mempool:error,*:debug", []string{ + `{"_msg":"Kingpin","level":"debug","module":"wire"}`, + ``, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, + + {"*:debug,wire:none", []string{ + ``, + `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, + `{"_msg":"Mesmero","level":"error","module":"mempool"}`, + `{"_msg":"Mind","level":"info","module":"state"}`, + `{"_msg":"Gideon","level":"debug"}`}}, + } + + for _, c := range correctLogLevels { + logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger, defaultLogLevelValue) + if err != nil { + t.Fatal(err) + } + + buf.Reset() + + logger.With("module", "mempool").With("module", "wire").Debug("Kingpin") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "mempool").Info("Kitty Pryde") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "mempool").Error("Mesmero") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl) + } + + buf.Reset() + + logger.With("module", "state").Info("Mind") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[3] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[3], have, c.lvl) + } + + buf.Reset() + + logger.Debug("Gideon") + if have := strings.TrimSpace(buf.String()); c.expectedLogLines[4] != have { + t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[4], have, c.lvl) + } + } + + incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} + for _, lvl := range incorrectLogLevel { + if _, err := tmflags.ParseLogLevel(lvl, jsonLogger, defaultLogLevelValue); err == nil { + t.Fatalf("Expected %s to produce error", lvl) + } + } +} diff --git a/chains/tendermint_34/libs/cli/helper.go b/chains/tendermint_34/libs/cli/helper.go new file mode 100755 index 0000000..878cf26 --- /dev/null +++ b/chains/tendermint_34/libs/cli/helper.go @@ -0,0 +1,87 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// WriteConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func WriteConfigVals(dir string, vals map[string]string) error { + data := "" + for k, v := range vals { + data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + } + cfile := filepath.Join(dir, "config.toml") + return ioutil.WriteFile(cfile, []byte(data), 0666) +} + +// RunWithArgs executes the given command with the specified command line args +// and environmental variables set. It returns any error returned from cmd.Execute() +func RunWithArgs(cmd Executable, args []string, env map[string]string) error { + oargs := os.Args + oenv := map[string]string{} + // defer returns the environment back to normal + defer func() { + os.Args = oargs + for k, v := range oenv { + os.Setenv(k, v) + } + }() + + // set the args and env how we want them + os.Args = args + for k, v := range env { + // backup old value if there, to restore at end + oenv[k] = os.Getenv(k) + err := os.Setenv(k, v) + if err != nil { + return err + } + } + + // and finally run the command + return cmd.Execute() +} + +// RunCaptureWithArgs executes the given command with the specified command +// line args and environmental variables set. It returns string fields +// representing output written to stdout and stderr, additionally any error +// from cmd.Execute() is also returned +func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { + oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout + rOut, wOut, _ := os.Pipe() + rErr, wErr, _ := os.Pipe() + os.Stdout, os.Stderr = wOut, wErr + defer func() { + os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout + }() + + // copy the output in a separate goroutine so printing can't block indefinitely + copyStd := func(reader *os.File) *(chan string) { + stdC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call reader.Close() below + io.Copy(&buf, reader) + stdC <- buf.String() + }() + return &stdC + } + outC := copyStd(rOut) + errC := copyStd(rErr) + + // now run the command + err = RunWithArgs(cmd, args, env) + + // and grab the stdout to return + wOut.Close() + wErr.Close() + stdout = <-*outC + stderr = <-*errC + return stdout, stderr, err +} diff --git a/chains/tendermint_34/libs/cli/setup.go b/chains/tendermint_34/libs/cli/setup.go new file mode 100755 index 0000000..06cf1cd --- /dev/null +++ b/chains/tendermint_34/libs/cli/setup.go @@ -0,0 +1,157 @@ +package cli + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + HomeFlag = "home" + TraceFlag = "trace" + OutputFlag = "output" + EncodingFlag = "encoding" +) + +// Executable is the minimal interface to *corba.Command, so we can +// wrap if desired before the test +type Executable interface { + Execute() error +} + +// PrepareBaseCmd is meant for tendermint and other servers +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { + cobra.OnInitialize(func() { initEnv(envPrefix) }) + cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") + cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") + cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) + return Executor{cmd, os.Exit} +} + +// PrepareMainCmd is meant for client side libs that want some more flags +// +// This adds --encoding (hex, btc, base64) and --output (text, json) to +// the command. These only really make sense in interactive commands. +func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { + cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") + cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") + cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) + return PrepareBaseCmd(cmd, envPrefix, defaultHome) +} + +// initEnv sets to use ENV variables if set. +func initEnv(prefix string) { + copyEnvVars(prefix) + + // env variables with TM prefix (eg. TM_ROOT) + viper.SetEnvPrefix(prefix) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + viper.AutomaticEnv() +} + +// This copies all variables like TMROOT to TM_ROOT, +// so we can support both formats for the user +func copyEnvVars(prefix string) { + prefix = strings.ToUpper(prefix) + ps := prefix + "_" + for _, e := range os.Environ() { + kv := strings.SplitN(e, "=", 2) + if len(kv) == 2 { + k, v := kv[0], kv[1] + if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { + k2 := strings.Replace(k, prefix, ps, 1) + os.Setenv(k2, v) + } + } + } +} + +// Executor wraps the cobra Command with a nicer Execute method +type Executor struct { + *cobra.Command + Exit func(int) // this is os.Exit by default, override in tests +} + +type ExitCoder interface { + ExitCode() int +} + +// execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func (e Executor) Execute() error { + e.SilenceUsage = true + e.SilenceErrors = true + err := e.Command.Execute() + if err != nil { + if viper.GetBool(TraceFlag) { + fmt.Fprintf(os.Stderr, "ERROR: %+v\n", err) + } else { + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + } + + // return error code 1 by default, can override it with a special error type + exitCode := 1 + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + e.Exit(exitCode) + } + return err +} + +type cobraCmdFunc func(cmd *cobra.Command, args []string) error + +// Returns a single function that calls each argument function in sequence +// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature +func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { + return func(cmd *cobra.Command, args []string) error { + for _, f := range fs { + if f != nil { + if err := f(cmd, args); err != nil { + return err + } + } + } + return nil + } +} + +// Bind all flags and read the config into viper +func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { + // cmd.Flags() includes flags from this command and all persistent flags from the parent + if err := viper.BindPFlags(cmd.Flags()); err != nil { + return err + } + + homeDir := viper.GetString(HomeFlag) + viper.Set(HomeFlag, homeDir) + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(homeDir) // search root directory + viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + // stderr, so if we redirect output to json file, this doesn't appear + // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) + } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + // ignore not found error, return other errors + return err + } + return nil +} + +func validateOutput(cmd *cobra.Command, args []string) error { + // validate output format + output := viper.GetString(OutputFlag) + switch output { + case "text", "json": + default: + return errors.Errorf("Unsupported output format: %s", output) + } + return nil +} diff --git a/chains/tendermint_34/libs/cli/setup_test.go b/chains/tendermint_34/libs/cli/setup_test.go new file mode 100755 index 0000000..04209e4 --- /dev/null +++ b/chains/tendermint_34/libs/cli/setup_test.go @@ -0,0 +1,237 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSetupEnv(t *testing.T) { + cases := []struct { + args []string + env map[string]string + expected string + }{ + {nil, nil, ""}, + {[]string{"--foobar", "bang!"}, nil, "bang!"}, + // make sure reset is good + {nil, nil, ""}, + // test both variants of the prefix + {nil, map[string]string{"DEMO_FOOBAR": "good"}, "good"}, + {nil, map[string]string{"DEMOFOOBAR": "silly"}, "silly"}, + // and that cli overrides env... + {[]string{"--foobar", "important"}, + map[string]string{"DEMO_FOOBAR": "ignored"}, "important"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo string + demo := &cobra.Command{ + Use: "demo", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("foobar") + return nil + }, + } + demo.Flags().String("foobar", "", "Some test value from config") + cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + } +} + +func tempDir() string { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + panic(err) + } + return cdir +} + +func TestSetupConfig(t *testing.T) { + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1 := "fubble" + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) + require.Nil(t, err) + + cases := []struct { + args []string + env map[string]string + expected string + expectedTwo string + }{ + {nil, nil, "", ""}, + // setting on the command line + {[]string{"--boo", "haha"}, nil, "haha", ""}, + {[]string{"--two-words", "rocks"}, nil, "", "rocks"}, + {[]string{"--home", conf1}, nil, cval1, ""}, + // test both variants of the prefix + {nil, map[string]string{"RD_BOO": "bang"}, "bang", ""}, + {nil, map[string]string{"RD_TWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RDTWO_WORDS": "fly"}, "", "fly"}, + {nil, map[string]string{"RD_HOME": conf1}, cval1, ""}, + {nil, map[string]string{"RDHOME": conf1}, cval1, ""}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + var foo, two string + boo := &cobra.Command{ + Use: "reader", + RunE: func(cmd *cobra.Command, args []string) error { + foo = viper.GetString("boo") + two = viper.GetString("two-words") + return nil + }, + } + boo.Flags().String("boo", "", "Some test value from config") + boo.Flags().String("two-words", "", "Check out env handling -") + cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, foo, i) + assert.Equal(t, tc.expectedTwo, two, i) + } +} + +type DemoConfig struct { + Name string `mapstructure:"name"` + Age int `mapstructure:"age"` + Unused int `mapstructure:"unused"` +} + +func TestSetupUnmarshal(t *testing.T) { + // we pre-create two config files we can refer to in the rest of + // the test cases. + cval1, cval2 := "someone", "else" + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"name": cval1}) + require.Nil(t, err) + // even with some ignored fields, should be no problem + conf2 := tempDir() + err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) + require.Nil(t, err) + + // unused is not declared on a flag and remains from base + base := DemoConfig{ + Name: "default", + Age: 42, + Unused: -7, + } + c := func(name string, age int) DemoConfig { + r := base + // anything set on the flags as a default is used over + // the default config object + r.Name = "from-flag" + if name != "" { + r.Name = name + } + if age != 0 { + r.Age = age + } + return r + } + + cases := []struct { + args []string + env map[string]string + expected DemoConfig + }{ + {nil, nil, c("", 0)}, + // setting on the command line + {[]string{"--name", "haha"}, nil, c("haha", 0)}, + {[]string{"--home", conf1}, nil, c(cval1, 0)}, + // test both variants of the prefix + {nil, map[string]string{"MR_AGE": "56"}, c("", 56)}, + {nil, map[string]string{"MR_HOME": conf1}, c(cval1, 0)}, + {[]string{"--age", "17"}, map[string]string{"MRHOME": conf2}, c(cval2, 17)}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + cfg := base + marsh := &cobra.Command{ + Use: "marsh", + RunE: func(cmd *cobra.Command, args []string) error { + return viper.Unmarshal(&cfg) + }, + } + marsh.Flags().String("name", "from-flag", "Some test value from config") + // if we want a flag to use the proper default, then copy it + // from the default config here + marsh.Flags().Int("age", base.Age, "Some test value from config") + cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + err := RunWithArgs(cmd, args, tc.env) + require.Nil(t, err, i) + assert.Equal(t, tc.expected, cfg, i) + } +} + +func TestSetupTrace(t *testing.T) { + cases := []struct { + args []string + env map[string]string + long bool + expected string + }{ + {nil, nil, false, "Trace flag = false"}, + {[]string{"--trace"}, nil, true, "Trace flag = true"}, + {[]string{"--no-such-flag"}, nil, false, "unknown flag: --no-such-flag"}, + {nil, map[string]string{"DBG_TRACE": "true"}, true, "Trace flag = true"}, + } + + for idx, tc := range cases { + i := strconv.Itoa(idx) + // test command that store value of foobar in local variable + trace := &cobra.Command{ + Use: "trace", + RunE: func(cmd *cobra.Command, args []string) error { + return errors.Errorf("Trace flag = %t", viper.GetBool(TraceFlag)) + }, + } + cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. + cmd.Exit = func(int) {} + + viper.Reset() + args := append([]string{cmd.Use}, tc.args...) + stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) + require.NotNil(t, err, i) + require.Equal(t, "", stdout, i) + require.NotEqual(t, "", stderr, i) + msg := strings.Split(stderr, "\n") + desired := fmt.Sprintf("ERROR: %s", tc.expected) + assert.Equal(t, desired, msg[0], i) + if tc.long && assert.True(t, len(msg) > 2, i) { + // the next line starts the stack trace... + assert.Contains(t, msg[1], "TestSetupTrace", i) + assert.Contains(t, msg[2], "setup_test.go", i) + } + } +} diff --git a/chains/tendermint_34/libs/clist/bench_test.go b/chains/tendermint_34/libs/clist/bench_test.go new file mode 100755 index 0000000..95973cc --- /dev/null +++ b/chains/tendermint_34/libs/clist/bench_test.go @@ -0,0 +1,46 @@ +package clist + +import "testing" + +func BenchmarkDetaching(b *testing.B) { + lst := New() + for i := 0; i < b.N+1; i++ { + lst.PushBack(i) + } + start := lst.Front() + nxt := start.Next() + b.ResetTimer() + for i := 0; i < b.N; i++ { + start.removed = true + start.DetachNext() + start.DetachPrev() + tmp := nxt + nxt = nxt.Next() + start = tmp + } +} + +// This is used to benchmark the time of RMutex. +func BenchmarkRemoved(b *testing.B) { + lst := New() + for i := 0; i < b.N+1; i++ { + lst.PushBack(i) + } + start := lst.Front() + nxt := start.Next() + b.ResetTimer() + for i := 0; i < b.N; i++ { + start.Removed() + tmp := nxt + nxt = nxt.Next() + start = tmp + } +} + +func BenchmarkPushBack(b *testing.B) { + lst := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lst.PushBack(i) + } +} diff --git a/chains/tendermint_34/libs/clist/clist.go b/chains/tendermint_34/libs/clist/clist.go new file mode 100755 index 0000000..393bdf7 --- /dev/null +++ b/chains/tendermint_34/libs/clist/clist.go @@ -0,0 +1,407 @@ +package clist + +/* + +The purpose of CList is to provide a goroutine-safe linked-list. +This list can be traversed concurrently by any number of goroutines. +However, removed CElements cannot be added back. +NOTE: Not all methods of container/list are (yet) implemented. +NOTE: Removed elements need to DetachPrev or DetachNext consistently +to ensure garbage collection of removed elements. + +*/ + +import ( + "fmt" + "sync" +) + +// MaxLength is the max allowed number of elements a linked list is +// allowed to contain. +// If more elements are pushed to the list it will panic. +const MaxLength = int(^uint(0) >> 1) + +/* + +CElement is an element of a linked-list +Traversal from a CElement is goroutine-safe. + +We can't avoid using WaitGroups or for-loops given the documentation +spec without re-implementing the primitives that already exist in +golang/sync. Notice that WaitGroup allows many go-routines to be +simultaneously released, which is what we want. Mutex doesn't do +this. RWMutex does this, but it's clumsy to use in the way that a +WaitGroup would be used -- and we'd end up having two RWMutex's for +prev/next each, which is doubly confusing. + +sync.Cond would be sort-of useful, but we don't need a write-lock in +the for-loop. Use sync.Cond when you need serial access to the +"condition". In our case our condition is if `next != nil || removed`, +and there's no reason to serialize that condition for goroutines +waiting on NextWait() (since it's just a read operation). + +*/ +type CElement struct { + mtx sync.RWMutex + prev *CElement + prevWg *sync.WaitGroup + prevWaitCh chan struct{} + next *CElement + nextWg *sync.WaitGroup + nextWaitCh chan struct{} + removed bool + + Value interface{} // immutable +} + +// Blocking implementation of Next(). +// May return nil iff CElement was tail and got removed. +func (e *CElement) NextWait() *CElement { + for { + e.mtx.RLock() + next := e.next + nextWg := e.nextWg + removed := e.removed + e.mtx.RUnlock() + + if next != nil || removed { + return next + } + + nextWg.Wait() + // e.next doesn't necessarily exist here. + // That's why we need to continue a for-loop. + } +} + +// Blocking implementation of Prev(). +// May return nil iff CElement was head and got removed. +func (e *CElement) PrevWait() *CElement { + for { + e.mtx.RLock() + prev := e.prev + prevWg := e.prevWg + removed := e.removed + e.mtx.RUnlock() + + if prev != nil || removed { + return prev + } + + prevWg.Wait() + } +} + +// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) PrevWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prevWaitCh +} + +// NextWaitChan can be used to wait until Next becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) NextWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.nextWaitCh +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Next() *CElement { + e.mtx.RLock() + val := e.next + e.mtx.RUnlock() + return val +} + +// Nonblocking, may return nil if at the end. +func (e *CElement) Prev() *CElement { + e.mtx.RLock() + prev := e.prev + e.mtx.RUnlock() + return prev +} + +func (e *CElement) Removed() bool { + e.mtx.RLock() + isRemoved := e.removed + e.mtx.RUnlock() + return isRemoved +} + +func (e *CElement) DetachNext() { + e.mtx.Lock() + if !e.removed { + e.mtx.Unlock() + panic("DetachNext() must be called after Remove(e)") + } + e.next = nil + e.mtx.Unlock() +} + +func (e *CElement) DetachPrev() { + e.mtx.Lock() + if !e.removed { + e.mtx.Unlock() + panic("DetachPrev() must be called after Remove(e)") + } + e.prev = nil + e.mtx.Unlock() +} + +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on nextWg. +func (e *CElement) SetNext(newNext *CElement) { + e.mtx.Lock() + + oldNext := e.next + e.next = newNext + if oldNext != nil && newNext == nil { + // See https://golang.org/pkg/sync/: + // + // If a WaitGroup is reused to wait for several independent sets of + // events, new Add calls must happen after all previous Wait calls have + // returned. + e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWaitCh = make(chan struct{}) + } + if oldNext == nil && newNext != nil { + e.nextWg.Done() + close(e.nextWaitCh) + } + e.mtx.Unlock() +} + +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on prevWg +func (e *CElement) SetPrev(newPrev *CElement) { + e.mtx.Lock() + + oldPrev := e.prev + e.prev = newPrev + if oldPrev != nil && newPrev == nil { + e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWaitCh = make(chan struct{}) + } + if oldPrev == nil && newPrev != nil { + e.prevWg.Done() + close(e.prevWaitCh) + } + e.mtx.Unlock() +} + +func (e *CElement) SetRemoved() { + e.mtx.Lock() + + e.removed = true + + // This wakes up anyone waiting in either direction. + if e.prev == nil { + e.prevWg.Done() + close(e.prevWaitCh) + } + if e.next == nil { + e.nextWg.Done() + close(e.nextWaitCh) + } + e.mtx.Unlock() +} + +//-------------------------------------------------------------------------------- + +// CList represents a linked list. +// The zero value for CList is an empty list ready to use. +// Operations are goroutine-safe. +// Panics if length grows beyond the max. +type CList struct { + mtx sync.RWMutex + wg *sync.WaitGroup + waitCh chan struct{} + head *CElement // first element + tail *CElement // last element + len int // list length + maxLen int // max list length +} + +func (l *CList) Init() *CList { + l.mtx.Lock() + + l.wg = waitGroup1() + l.waitCh = make(chan struct{}) + l.head = nil + l.tail = nil + l.len = 0 + l.mtx.Unlock() + return l +} + +// Return CList with MaxLength. CList will panic if it goes beyond MaxLength. +func New() *CList { return newWithMax(MaxLength) } + +// Return CList with given maxLength. +// Will panic if list exceeds given maxLength. +func newWithMax(maxLength int) *CList { + l := new(CList) + l.maxLen = maxLength + return l.Init() +} + +func (l *CList) Len() int { + l.mtx.RLock() + len := l.len + l.mtx.RUnlock() + return len +} + +func (l *CList) Front() *CElement { + l.mtx.RLock() + head := l.head + l.mtx.RUnlock() + return head +} + +func (l *CList) FrontWait() *CElement { + // Loop until the head is non-nil else wait and try again + for { + l.mtx.RLock() + head := l.head + wg := l.wg + l.mtx.RUnlock() + + if head != nil { + return head + } + wg.Wait() + // NOTE: If you think l.head exists here, think harder. + } +} + +func (l *CList) Back() *CElement { + l.mtx.RLock() + back := l.tail + l.mtx.RUnlock() + return back +} + +func (l *CList) BackWait() *CElement { + for { + l.mtx.RLock() + tail := l.tail + wg := l.wg + l.mtx.RUnlock() + + if tail != nil { + return tail + } + wg.Wait() + // l.tail doesn't necessarily exist here. + // That's why we need to continue a for-loop. + } +} + +// WaitChan can be used to wait until Front or Back becomes not nil. Once it +// does, channel will be closed. +func (l *CList) WaitChan() <-chan struct{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + return l.waitCh +} + +// Panics if list grows beyond its max length. +func (l *CList) PushBack(v interface{}) *CElement { + l.mtx.Lock() + + // Construct a new element + e := &CElement{ + prev: nil, + prevWg: waitGroup1(), + prevWaitCh: make(chan struct{}), + next: nil, + nextWg: waitGroup1(), + nextWaitCh: make(chan struct{}), + removed: false, + Value: v, + } + + // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { + l.wg.Done() + close(l.waitCh) + } + if l.len >= l.maxLen { + panic(fmt.Sprintf("clist: maximum length list reached %d", l.maxLen)) + } + l.len++ + + // Modify the tail + if l.tail == nil { + l.head = e + l.tail = e + } else { + e.SetPrev(l.tail) // We must init e first. + l.tail.SetNext(e) // This will make e accessible. + l.tail = e // Update the list. + } + l.mtx.Unlock() + return e +} + +// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. +// NOTE: As per the contract of CList, removed elements cannot be added back. +func (l *CList) Remove(e *CElement) interface{} { + l.mtx.Lock() + + prev := e.Prev() + next := e.Next() + + if l.head == nil || l.tail == nil { + l.mtx.Unlock() + panic("Remove(e) on empty CList") + } + if prev == nil && l.head != e { + l.mtx.Unlock() + panic("Remove(e) with false head") + } + if next == nil && l.tail != e { + l.mtx.Unlock() + panic("Remove(e) with false tail") + } + + // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { + l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.waitCh = make(chan struct{}) + } + + // Update l.len + l.len-- + + // Connect next/prev and set head/tail + if prev == nil { + l.head = next + } else { + prev.SetNext(next) + } + if next == nil { + l.tail = prev + } else { + next.SetPrev(prev) + } + + // Set .Done() on e, otherwise waiters will wait forever. + e.SetRemoved() + + l.mtx.Unlock() + return e.Value +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} diff --git a/chains/tendermint_34/libs/clist/clist_test.go b/chains/tendermint_34/libs/clist/clist_test.go new file mode 100755 index 0000000..4ded617 --- /dev/null +++ b/chains/tendermint_34/libs/clist/clist_test.go @@ -0,0 +1,307 @@ +package clist + +import ( + "fmt" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestPanicOnMaxLength(t *testing.T) { + maxLength := 1000 + + l := newWithMax(maxLength) + for i := 0; i < maxLength; i++ { + l.PushBack(1) + } + assert.Panics(t, func() { + l.PushBack(1) + }) +} + +func TestSmall(t *testing.T) { + l := New() + el1 := l.PushBack(1) + el2 := l.PushBack(2) + el3 := l.PushBack(3) + if l.Len() != 3 { + t.Error("Expected len 3, got ", l.Len()) + } + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r1 := l.Remove(el1) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r2 := l.Remove(el2) + + //fmt.Printf("%p %v\n", el1, el1) + //fmt.Printf("%p %v\n", el2, el2) + //fmt.Printf("%p %v\n", el3, el3) + + r3 := l.Remove(el3) + + if r1 != 1 { + t.Error("Expected 1, got ", r1) + } + if r2 != 2 { + t.Error("Expected 2, got ", r2) + } + if r3 != 3 { + t.Error("Expected 3, got ", r3) + } + if l.Len() != 0 { + t.Error("Expected len 0, got ", l.Len()) + } + +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +// nolint: megacheck +func _TestGCFifo(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := new(uint64) + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + done := make(chan struct{}) + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + atomic.AddUint64(gcCount, 1) + }) + } + + for el := l.Front(); el != nil; { + l.Remove(el) + //oldEl := el + el = el.Next() + //oldEl.DetachPrev() + //oldEl.DetachNext() + } + + runtime.GC() + time.Sleep(time.Second * 3) + runtime.GC() + time.Sleep(time.Second * 3) + _ = done + + if *gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + *gcCount) + } +} + +/* +This test is quite hacky because it relies on SetFinalizer +which isn't guaranteed to run at all. +*/ +// nolint: megacheck +func _TestGCRandom(t *testing.T) { + + const numElements = 1000000 + l := New() + gcCount := 0 + + // SetFinalizer doesn't work well with circular structures, + // so we construct a trivial non-circular structure to + // track. + type value struct { + Int int + } + + for i := 0; i < numElements; i++ { + v := new(value) + v.Int = i + l.PushBack(v) + runtime.SetFinalizer(v, func(v *value) { + gcCount++ + }) + } + + els := make([]*CElement, 0, numElements) + for el := l.Front(); el != nil; el = el.Next() { + els = append(els, el) + } + + for _, i := range cmn.RandPerm(numElements) { + el := els[i] + l.Remove(el) + _ = el.Next() + } + + runtime.GC() + time.Sleep(time.Second * 3) + + if gcCount != numElements { + t.Errorf("Expected gcCount to be %v, got %v", numElements, + gcCount) + } +} + +func TestScanRightDeleteRandom(t *testing.T) { + + const numElements = 1000 + const numTimes = 100 + const numScanners = 10 + + l := New() + stop := make(chan struct{}) + + els := make([]*CElement, numElements) + for i := 0; i < numElements; i++ { + el := l.PushBack(i) + els[i] = el + } + + // Launch scanner routines that will rapidly iterate over elements. + for i := 0; i < numScanners; i++ { + go func(scannerID int) { + var el *CElement + restartCounter := 0 + counter := 0 + FOR_LOOP: + for { + select { + case <-stop: + fmt.Println("stopped") + break FOR_LOOP + default: + } + if el == nil { + el = l.FrontWait() + restartCounter++ + } + el = el.Next() + counter++ + } + fmt.Printf("Scanner %v restartCounter: %v counter: %v\n", scannerID, restartCounter, counter) + }(i) + } + + // Remove an element, push back an element. + for i := 0; i < numTimes; i++ { + // Pick an element to remove + rmElIdx := cmn.RandIntn(len(els)) + rmEl := els[rmElIdx] + + // Remove it + l.Remove(rmEl) + //fmt.Print(".") + + // Insert a new element + newEl := l.PushBack(-1*i - 1) + els[rmElIdx] = newEl + + if i%100000 == 0 { + fmt.Printf("Pushed %vK elements so far...\n", i/1000) + } + + } + + // Stop scanners + close(stop) + // time.Sleep(time.Second * 1) + + // And remove all the elements. + for el := l.Front(); el != nil; el = el.Next() { + l.Remove(el) + } + if l.Len() != 0 { + t.Fatal("Failed to remove all elements from CList") + } +} + +func TestWaitChan(t *testing.T) { + l := New() + ch := l.WaitChan() + + // 1) add one element to an empty list + go l.PushBack(1) + <-ch + + // 2) and remove it + el := l.Front() + v := l.Remove(el) + if v != 1 { + t.Fatal("where is 1 coming from?") + } + + // 3) test iterating forward and waiting for Next (NextWaitChan and Next) + el = l.PushBack(0) + + done := make(chan struct{}) + pushed := 0 + go func() { + for i := 1; i < 100; i++ { + l.PushBack(i) + pushed++ + time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond) + } + close(done) + }() + + next := el + seen := 0 +FOR_LOOP: + for { + select { + case <-next.NextWaitChan(): + next = next.Next() + seen++ + if next == nil { + continue + } + case <-done: + break FOR_LOOP + case <-time.After(10 * time.Second): + t.Fatal("max execution time") + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } + + // 4) test iterating backwards (PrevWaitChan and Prev) + prev := next + seen = 0 +FOR_LOOP2: + for { + select { + case <-prev.PrevWaitChan(): + prev = prev.Prev() + seen++ + if prev == nil { + t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") + } + case <-time.After(3 * time.Second): + break FOR_LOOP2 + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } +} diff --git a/chains/tendermint_34/libs/common/LICENSE b/chains/tendermint_34/libs/common/LICENSE new file mode 100755 index 0000000..8a142a7 --- /dev/null +++ b/chains/tendermint_34/libs/common/LICENSE @@ -0,0 +1,193 @@ +Tendermint Go-Common +Copyright (C) 2015 Tendermint + + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/chains/tendermint_34/libs/common/async.go b/chains/tendermint_34/libs/common/async.go new file mode 100755 index 0000000..e3293ab --- /dev/null +++ b/chains/tendermint_34/libs/common/async.go @@ -0,0 +1,175 @@ +package common + +import ( + "sync/atomic" +) + +//---------------------------------------- +// Task + +// val: the value returned after task execution. +// err: the error returned during task completion. +// abort: tells Parallel to return, whether or not all tasks have completed. +type Task func(i int) (val interface{}, err error, abort bool) + +type TaskResult struct { + Value interface{} + Error error +} + +type TaskResultCh <-chan TaskResult + +type taskResultOK struct { + TaskResult + OK bool +} + +type TaskResultSet struct { + chz []TaskResultCh + results []taskResultOK +} + +func newTaskResultSet(chz []TaskResultCh) *TaskResultSet { + return &TaskResultSet{ + chz: chz, + results: make([]taskResultOK, len(chz)), + } +} + +func (trs *TaskResultSet) Channels() []TaskResultCh { + return trs.chz +} + +func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { + if len(trs.results) <= index { + return TaskResult{}, false + } + resultOK := trs.results[index] + return resultOK.TaskResult, resultOK.OK +} + +// NOTE: Not concurrency safe. +// Writes results to trs.results without waiting for all tasks to complete. +func (trs *TaskResultSet) Reap() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + select { + case result, ok := <-trch: + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + default: + // Do nothing. + } + } + return trs +} + +// NOTE: Not concurrency safe. +// Like Reap() but waits until all tasks have returned or panic'd. +func (trs *TaskResultSet) Wait() *TaskResultSet { + for i := 0; i < len(trs.results); i++ { + var trch = trs.chz[i] + result, ok := <-trch + if ok { + // Write result. + trs.results[i] = taskResultOK{ + TaskResult: result, + OK: true, + } + } else { + // We already wrote it. + } + } + return trs +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstValue() interface{} { + for _, result := range trs.results { + if result.Value != nil { + return result.Value + } + } + return nil +} + +// Returns the firstmost (by task index) error as +// discovered by all previous Reap() calls. +func (trs *TaskResultSet) FirstError() error { + for _, result := range trs.results { + if result.Error != nil { + return result.Error + } + } + return nil +} + +//---------------------------------------- +// Parallel + +// Run tasks in parallel, with ability to abort early. +// Returns ok=false iff any of the tasks returned abort=true. +// NOTE: Do not implement quit features here. Instead, provide convenient +// concurrent quit-like primitives, passed implicitly via Task closures. (e.g. +// it's not Parallel's concern how you quit/abort your tasks). +func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { + var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. + var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. + var numPanics = new(int32) // Keep track of panics to set ok=false later. + ok = true // We will set it to false iff any tasks panic'd or returned abort. + + // Start all tasks in parallel in separate goroutines. + // When the task is complete, it will appear in the + // respective taskResultCh (associated by task index). + for i, task := range tasks { + var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result. + taskResultChz[i] = taskResultCh + go func(i int, task Task, taskResultCh chan TaskResult) { + // Recovery + defer func() { + if pnk := recover(); pnk != nil { + atomic.AddInt32(numPanics, 1) + // Send panic to taskResultCh. + taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. + taskDoneCh <- false + } + }() + // Run the task. + var val, err, abort = task(i) + // Send val/err to taskResultCh. + // NOTE: Below this line, nothing must panic/ + taskResultCh <- TaskResult{val, err} + // Closing taskResultCh lets trs.Wait() work. + close(taskResultCh) + // Decrement waitgroup. + taskDoneCh <- abort + }(i, task, taskResultCh) + } + + // Wait until all tasks are done, or until abort. + // DONE_LOOP: + for i := 0; i < len(tasks); i++ { + abort := <-taskDoneCh + if abort { + ok = false + break + } + } + + // Ok is also false if there were any panics. + // We must do this check here (after DONE_LOOP). + ok = ok && (atomic.LoadInt32(numPanics) == 0) + + return newTaskResultSet(taskResultChz).Reap(), ok +} diff --git a/chains/tendermint_34/libs/common/async_test.go b/chains/tendermint_34/libs/common/async_test.go new file mode 100755 index 0000000..4149eb9 --- /dev/null +++ b/chains/tendermint_34/libs/common/async_test.go @@ -0,0 +1,156 @@ +package common + +import ( + "errors" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestParallel(t *testing.T) { + + // Create tasks. + var counter = new(int32) + var tasks = make([]Task, 100*1000) + for i := 0; i < len(tasks); i++ { + tasks[i] = func(i int) (res interface{}, err error, abort bool) { + atomic.AddInt32(counter, 1) + return -1 * i, nil, false + } + } + + // Run in parallel. + var trs, ok = Parallel(tasks...) + assert.True(t, ok) + + // Verify. + assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") + var failedTasks int + for i := 0; i < len(tasks); i++ { + taskResult, ok := trs.LatestResult(i) + if !ok { + assert.Fail(t, "Task #%v did not complete.", i) + failedTasks++ + } else if taskResult.Error != nil { + assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) + failedTasks++ + } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { + assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) + failedTasks++ + } else { + // Good! + } + } + assert.Equal(t, failedTasks, 0, "No task should have failed") + assert.Nil(t, trs.FirstError(), "There should be no errors") + assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") +} + +func TestParallelAbort(t *testing.T) { + + var flow1 = make(chan struct{}, 1) + var flow2 = make(chan struct{}, 1) + var flow3 = make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. + var flow4 = make(chan struct{}, 1) + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 0) + flow1 <- struct{}{} + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 1) + flow2 <- <-flow1 + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 2) + flow3 <- <-flow2 + return 2, nil, true + }, + func(i int) (res interface{}, err error, abort bool) { + assert.Equal(t, i, 3) + <-flow4 + return 3, nil, false + }, + } + + // Run in parallel. + var taskResultSet, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we aborted task #2.") + + // Verify task #3. + // Initially taskResultSet.chz[3] sends nothing since flow4 didn't send. + waitTimeout(t, taskResultSet.chz[3], "Task #3") + + // Now let the last task (#3) complete after abort. + flow4 <- <-flow3 + + // Wait until all tasks have returned or panic'd. + taskResultSet.Wait() + + // Verify task #0, #1, #2. + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, 2, nil, nil) + checkResult(t, taskResultSet, 3, 3, nil, nil) +} + +func TestParallelRecover(t *testing.T) { + + // Create tasks. + var tasks = []Task{ + func(i int) (res interface{}, err error, abort bool) { + return 0, nil, false + }, + func(i int) (res interface{}, err error, abort bool) { + return 1, errors.New("some error"), false + }, + func(i int) (res interface{}, err error, abort bool) { + panic(2) + }, + } + + // Run in parallel. + var taskResultSet, ok = Parallel(tasks...) + assert.False(t, ok, "ok should be false since we panic'd in task #2.") + + // Verify task #0, #1, #2. + checkResult(t, taskResultSet, 0, 0, nil, nil) + checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) + checkResult(t, taskResultSet, 2, nil, nil, 2) +} + +// Wait for result +func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val interface{}, err error, pnk interface{}) { + taskResult, ok := taskResultSet.LatestResult(index) + taskName := fmt.Sprintf("Task #%v", index) + assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) + assert.Equal(t, val, taskResult.Value, taskName) + if err != nil { + assert.Equal(t, err, taskResult.Error, taskName) + } else if pnk != nil { + assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName) + } else { + assert.Nil(t, taskResult.Error, taskName) + } +} + +// Wait for timeout (no result) +func waitTimeout(t *testing.T, taskResultCh TaskResultCh, taskName string) { + select { + case _, ok := <-taskResultCh: + if !ok { + assert.Fail(t, "TaskResultCh unexpectedly closed (%v)", taskName) + } else { + assert.Fail(t, "TaskResultCh unexpectedly returned for %v", taskName) + } + case <-time.After(1 * time.Second): + // Good! + } +} diff --git a/chains/tendermint_34/libs/common/bit_array.go b/chains/tendermint_34/libs/common/bit_array.go new file mode 100755 index 0000000..8db46b9 --- /dev/null +++ b/chains/tendermint_34/libs/common/bit_array.go @@ -0,0 +1,416 @@ +package common + +import ( + "encoding/binary" + "fmt" + "regexp" + "strings" + "sync" +) + +// BitArray is a thread-safe implementation of a bit array. +type BitArray struct { + mtx sync.Mutex + Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported + Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported +} + +// NewBitArray returns a new bit array. +// It returns nil if the number of bits is zero. +func NewBitArray(bits int) *BitArray { + if bits <= 0 { + return nil + } + return &BitArray{ + Bits: bits, + Elems: make([]uint64, (bits+63)/64), + } +} + +// Size returns the number of bits in the bitarray +func (bA *BitArray) Size() int { + if bA == nil { + return 0 + } + return bA.Bits +} + +// GetIndex returns the bit at index i within the bit array. +// The behavior is undefined if i >= bA.Bits +func (bA *BitArray) GetIndex(i int) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.getIndex(i) +} + +func (bA *BitArray) getIndex(i int) bool { + if i >= bA.Bits { + return false + } + return bA.Elems[i/64]&(uint64(1)< 0 +} + +// SetIndex sets the bit at index i within the bit array. +// The behavior is undefined if i >= bA.Bits +func (bA *BitArray) SetIndex(i int, v bool) bool { + if bA == nil { + return false + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.setIndex(i, v) +} + +func (bA *BitArray) setIndex(i int, v bool) bool { + if i >= bA.Bits { + return false + } + if v { + bA.Elems[i/64] |= (uint64(1) << uint(i%64)) + } else { + bA.Elems[i/64] &= ^(uint64(1) << uint(i%64)) + } + return true +} + +// Copy returns a copy of the provided bit array. +func (bA *BitArray) Copy() *BitArray { + if bA == nil { + return nil + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.copy() +} + +func (bA *BitArray) copy() *BitArray { + c := make([]uint64, len(bA.Elems)) + copy(c, bA.Elems) + return &BitArray{ + Bits: bA.Bits, + Elems: c, + } +} + +func (bA *BitArray) copyBits(bits int) *BitArray { + c := make([]uint64, (bits+63)/64) + copy(c, bA.Elems) + return &BitArray{ + Bits: bits, + Elems: c, + } +} + +// Or returns a bit array resulting from a bitwise OR of the two bit arrays. +// If the two bit-arrys have different lengths, Or right-pads the smaller of the two bit-arrays with zeroes. +// Thus the size of the return value is the maximum of the two provided bit arrays. +func (bA *BitArray) Or(o *BitArray) *BitArray { + if bA == nil && o == nil { + return nil + } + if bA == nil && o != nil { + return o.Copy() + } + if o == nil { + return bA.Copy() + } + bA.mtx.Lock() + o.mtx.Lock() + c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { + c.Elems[i] |= o.Elems[i] + } + bA.mtx.Unlock() + o.mtx.Unlock() + return c +} + +// And returns a bit array resulting from a bitwise AND of the two bit arrays. +// If the two bit-arrys have different lengths, this truncates the larger of the two bit-arrays from the right. +// Thus the size of the return value is the minimum of the two provided bit arrays. +func (bA *BitArray) And(o *BitArray) *BitArray { + if bA == nil || o == nil { + return nil + } + bA.mtx.Lock() + o.mtx.Lock() + defer func() { + bA.mtx.Unlock() + o.mtx.Unlock() + }() + return bA.and(o) +} + +func (bA *BitArray) and(o *BitArray) *BitArray { + c := bA.copyBits(MinInt(bA.Bits, o.Bits)) + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] &= o.Elems[i] + } + return c +} + +// Not returns a bit array resulting from a bitwise Not of the provided bit array. +func (bA *BitArray) Not() *BitArray { + if bA == nil { + return nil // Degenerate + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.not() +} + +func (bA *BitArray) not() *BitArray { + c := bA.copy() + for i := 0; i < len(c.Elems); i++ { + c.Elems[i] = ^c.Elems[i] + } + return c +} + +// Sub subtracts the two bit-arrays bitwise, without carrying the bits. +// Note that carryless subtraction of a - b is (a and not b). +// The output is the same as bA, regardless of o's size. +// If bA is longer than o, o is right padded with zeroes +func (bA *BitArray) Sub(o *BitArray) *BitArray { + if bA == nil || o == nil { + return nil + } + bA.mtx.Lock() + o.mtx.Lock() + // output is the same size as bA + c := bA.copyBits(bA.Bits) + // Only iterate to the minimum size between the two. + // If o is longer, those bits are ignored. + // If bA is longer, then skipping those iterations is equivalent + // to right padding with 0's + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { + // &^ is and not in golang + c.Elems[i] &^= o.Elems[i] + } + bA.mtx.Unlock() + o.mtx.Unlock() + return c +} + +// IsEmpty returns true iff all bits in the bit array are 0 +func (bA *BitArray) IsEmpty() bool { + if bA == nil { + return true // should this be opposite? + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + for _, e := range bA.Elems { + if e > 0 { + return false + } + } + return true +} + +// IsFull returns true iff all bits in the bit array are 1. +func (bA *BitArray) IsFull() bool { + if bA == nil { + return true + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + + // Check all elements except the last + for _, elem := range bA.Elems[:len(bA.Elems)-1] { + if (^elem) != 0 { + return false + } + } + + // Check that the last element has (lastElemBits) 1's + lastElemBits := (bA.Bits+63)%64 + 1 + lastElem := bA.Elems[len(bA.Elems)-1] + return (lastElem+1)&((uint64(1)< 0 { + trueIndices = append(trueIndices, curBit) + } + curBit++ + } + } + // handle last element + lastElem := bA.Elems[numElems-1] + numFinalBits := bA.Bits - curBit + for i := 0; i < numFinalBits; i++ { + if (lastElem & (uint64(1) << uint64(i))) > 0 { + trueIndices = append(trueIndices, curBit) + } + curBit++ + } + return trueIndices +} + +// String returns a string representation of BitArray: BA{}, +// where is a sequence of 'x' (1) and '_' (0). +// The includes spaces and newlines to help people. +// For a simple sequence of 'x' and '_' characters with no spaces or newlines, +// see the MarshalJSON() method. +// Example: "BA{_x_}" or "nil-BitArray" for nil. +func (bA *BitArray) String() string { + return bA.StringIndented("") +} + +// StringIndented returns the same thing as String(), but applies the indent +// at every 10th bit, and twice at every 50th bit. +func (bA *BitArray) StringIndented(indent string) string { + if bA == nil { + return "nil-BitArray" + } + bA.mtx.Lock() + defer bA.mtx.Unlock() + return bA.stringIndented(indent) +} + +func (bA *BitArray) stringIndented(indent string) string { + lines := []string{} + bits := "" + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += "x" + } else { + bits += "_" + } + if i%100 == 99 { + lines = append(lines, bits) + bits = "" + } + if i%10 == 9 { + bits += indent + } + if i%50 == 49 { + bits += indent + } + } + if len(bits) > 0 { + lines = append(lines, bits) + } + return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) +} + +// Bytes returns the byte representation of the bits within the bitarray. +func (bA *BitArray) Bytes() []byte { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + numBytes := (bA.Bits + 7) / 8 + bytes := make([]byte, numBytes) + for i := 0; i < len(bA.Elems); i++ { + elemBytes := [8]byte{} + binary.LittleEndian.PutUint64(elemBytes[:], bA.Elems[i]) + copy(bytes[i*8:], elemBytes[:]) + } + return bytes +} + +// Update sets the bA's bits to be that of the other bit array. +// The copying begins from the begin of both bit arrays. +func (bA *BitArray) Update(o *BitArray) { + if bA == nil || o == nil { + return + } + bA.mtx.Lock() + o.mtx.Lock() + defer func() { + bA.mtx.Unlock() + o.mtx.Unlock() + }() + + copy(bA.Elems, o.Elems) +} + +// MarshalJSON implements json.Marshaler interface by marshaling bit array +// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. +func (bA *BitArray) MarshalJSON() ([]byte, error) { + if bA == nil { + return []byte("null"), nil + } + + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bits := `"` + for i := 0; i < bA.Bits; i++ { + if bA.getIndex(i) { + bits += `x` + } else { + bits += `_` + } + } + bits += `"` + return []byte(bits), nil +} + +var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) + +// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom +// JSON description. +func (bA *BitArray) UnmarshalJSON(bz []byte) error { + b := string(bz) + if b == "null" { + // This is required e.g. for encoding/json when decoding + // into a pointer with pre-allocated BitArray. + bA.Bits = 0 + bA.Elems = nil + return nil + } + + // Validate 'b'. + match := bitArrayJSONRegexp.FindStringSubmatch(b) + if match == nil { + return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) + } + bits := match[1] + + // Construct new BitArray and copy over. + numBits := len(bits) + bA2 := NewBitArray(numBits) + for i := 0; i < numBits; i++ { + if bits[i] == 'x' { + bA2.SetIndex(i, true) + } + } + *bA = *bA2 + return nil +} diff --git a/chains/tendermint_34/libs/common/bit_array_test.go b/chains/tendermint_34/libs/common/bit_array_test.go new file mode 100755 index 0000000..09ec8af --- /dev/null +++ b/chains/tendermint_34/libs/common/bit_array_test.go @@ -0,0 +1,256 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func randBitArray(bits int) (*BitArray, []byte) { + src := RandBytes((bits + 7) / 8) + bA := NewBitArray(bits) + for i := 0; i < len(src); i++ { + for j := 0; j < 8; j++ { + if i*8+j >= bits { + return bA, src + } + setBit := src[i]&(1< 0 + bA.SetIndex(i*8+j, setBit) + } + } + return bA, src +} + +func TestAnd(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.And(bA2) + + var bNil *BitArray + require.Equal(t, bNil.And(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.And(nil), (*BitArray)(nil)) + require.Equal(t, bNil.And(nil), (*BitArray)(nil)) + + if bA3.Bits != 31 { + t.Error("Expected min bits", bA3.Bits) + } + if len(bA3.Elems) != len(bA2.Elems) { + t.Error("Expected min elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) && bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestOr(t *testing.T) { + + bA1, _ := randBitArray(51) + bA2, _ := randBitArray(31) + bA3 := bA1.Or(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Or(bA1), bA1) + require.Equal(t, bA1.Or(nil), bA1) + require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) + + if bA3.Bits != 51 { + t.Error("Expected max bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected max elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) || bA2.GetIndex(i) + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) + } + } +} + +func TestSub(t *testing.T) { + testCases := []struct { + initBA string + subtractingBA string + expectedBA string + }{ + {`null`, `null`, `null`}, + {`"x"`, `null`, `null`}, + {`null`, `"x"`, `null`}, + {`"x"`, `"x"`, `"_"`}, + {`"xxxxxx"`, `"x_x_x_"`, `"_x_x_x"`}, + {`"x_x_x_"`, `"xxxxxx"`, `"______"`}, + {`"xxxxxx"`, `"x_x_x_xxxx"`, `"_x_x_x"`}, + {`"x_x_x_xxxx"`, `"xxxxxx"`, `"______xxxx"`}, + {`"xxxxxxxxxx"`, `"x_x_x_"`, `"_x_x_xxxxx"`}, + {`"x_x_x_"`, `"xxxxxxxxxx"`, `"______"`}, + } + for _, tc := range testCases { + var bA *BitArray + err := json.Unmarshal([]byte(tc.initBA), &bA) + require.Nil(t, err) + + var o *BitArray + err = json.Unmarshal([]byte(tc.subtractingBA), &o) + require.Nil(t, err) + + got, _ := json.Marshal(bA.Sub(o)) + require.Equal(t, tc.expectedBA, string(got), "%s minus %s doesn't equal %s", tc.initBA, tc.subtractingBA, tc.expectedBA) + } +} + +func TestPickRandom(t *testing.T) { + empty16Bits := "________________" + empty64Bits := empty16Bits + empty16Bits + empty16Bits + empty16Bits + testCases := []struct { + bA string + ok bool + }{ + {`null`, false}, + {`"x"`, true}, + {`"` + empty16Bits + `"`, false}, + {`"x` + empty16Bits + `"`, true}, + {`"` + empty16Bits + `x"`, true}, + {`"x` + empty16Bits + `x"`, true}, + {`"` + empty64Bits + `"`, false}, + {`"x` + empty64Bits + `"`, true}, + {`"` + empty64Bits + `x"`, true}, + {`"x` + empty64Bits + `x"`, true}, + } + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(tc.bA), &bitArr) + require.NoError(t, err) + _, ok := bitArr.PickRandom() + require.Equal(t, tc.ok, ok, "PickRandom got an unexpected result on input %s", tc.bA) + } +} + +func TestBytes(t *testing.T) { + bA := NewBitArray(4) + bA.SetIndex(0, true) + check := func(bA *BitArray, bz []byte) { + if !bytes.Equal(bA.Bytes(), bz) { + panic(fmt.Sprintf("Expected %X but got %X", bz, bA.Bytes())) + } + } + check(bA, []byte{0x01}) + bA.SetIndex(3, true) + check(bA, []byte{0x09}) + + bA = NewBitArray(9) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + + bA = NewBitArray(16) + check(bA, []byte{0x00, 0x00}) + bA.SetIndex(7, true) + check(bA, []byte{0x80, 0x00}) + bA.SetIndex(8, true) + check(bA, []byte{0x80, 0x01}) + bA.SetIndex(9, true) + check(bA, []byte{0x80, 0x03}) +} + +func TestEmptyFull(t *testing.T) { + ns := []int{47, 123} + for _, n := range ns { + bA := NewBitArray(n) + if !bA.IsEmpty() { + t.Fatal("Expected bit array to be empty") + } + for i := 0; i < n; i++ { + bA.SetIndex(i, true) + } + if !bA.IsFull() { + t.Fatal("Expected bit array to be full") + } + } +} + +func TestUpdateNeverPanics(t *testing.T) { + newRandBitArray := func(n int) *BitArray { + ba, _ := randBitArray(n) + return ba + } + pairs := []struct { + a, b *BitArray + }{ + {nil, nil}, + {newRandBitArray(10), newRandBitArray(12)}, + {newRandBitArray(23), newRandBitArray(23)}, + {newRandBitArray(37), nil}, + {nil, NewBitArray(10)}, + } + + for _, pair := range pairs { + a, b := pair.a, pair.b + a.Update(b) + b.Update(a) + } +} + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1 << 31} + for _, bits := range bitList { + _ = NewBitArray(bits) + } +} + +func TestJSONMarshalUnmarshal(t *testing.T) { + + bA1 := NewBitArray(0) + + bA2 := NewBitArray(1) + + bA3 := NewBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + testCases := []struct { + bA *BitArray + marshalledBA string + }{ + {nil, `null`}, + {bA1, `null`}, + {bA2, `"_"`}, + {bA3, `"x"`}, + {bA4, `"xx___"`}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz, err := json.Marshal(tc.bA) + require.NoError(t, err) + + assert.Equal(t, tc.marshalledBA, string(bz)) + + var unmarshalledBA *BitArray + err = json.Unmarshal(bz, &unmarshalledBA) + require.NoError(t, err) + + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Bits, unmarshalledBA.Bits) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} diff --git a/chains/tendermint_34/libs/common/bytes.go b/chains/tendermint_34/libs/common/bytes.go new file mode 100755 index 0000000..711720a --- /dev/null +++ b/chains/tendermint_34/libs/common/bytes.go @@ -0,0 +1,62 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], []byte(s)) + jbz[len(jbz)-1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *bz = bz2 + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} + +func (bz HexBytes) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", bz))) + default: + s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + } +} diff --git a/chains/tendermint_34/libs/common/bytes_test.go b/chains/tendermint_34/libs/common/bytes_test.go new file mode 100755 index 0000000..af1460c --- /dev/null +++ b/chains/tendermint_34/libs/common/bytes_test.go @@ -0,0 +1,63 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} diff --git a/chains/tendermint_34/libs/common/byteslice.go b/chains/tendermint_34/libs/common/byteslice.go new file mode 100755 index 0000000..af2d794 --- /dev/null +++ b/chains/tendermint_34/libs/common/byteslice.go @@ -0,0 +1,10 @@ +package common + +// Fingerprint returns the first 6 bytes of a byte slice. +// If the slice is less than 6 bytes, the fingerprint +// contains trailing zeroes. +func Fingerprint(slice []byte) []byte { + fingerprint := make([]byte, 6) + copy(fingerprint, slice) + return fingerprint +} diff --git a/chains/tendermint_34/libs/common/cmap.go b/chains/tendermint_34/libs/common/cmap.go new file mode 100755 index 0000000..2f7720d --- /dev/null +++ b/chains/tendermint_34/libs/common/cmap.go @@ -0,0 +1,75 @@ +package common + +import "sync" + +// CMap is a goroutine-safe map +type CMap struct { + m map[string]interface{} + l sync.Mutex +} + +func NewCMap() *CMap { + return &CMap{ + m: make(map[string]interface{}), + } +} + +func (cm *CMap) Set(key string, value interface{}) { + cm.l.Lock() + cm.m[key] = value + cm.l.Unlock() +} + +func (cm *CMap) Get(key string) interface{} { + cm.l.Lock() + val := cm.m[key] + cm.l.Unlock() + return val +} + +func (cm *CMap) Has(key string) bool { + cm.l.Lock() + _, ok := cm.m[key] + cm.l.Unlock() + return ok +} + +func (cm *CMap) Delete(key string) { + cm.l.Lock() + delete(cm.m, key) + cm.l.Unlock() +} + +func (cm *CMap) Size() int { + cm.l.Lock() + size := len(cm.m) + cm.l.Unlock() + return size +} + +func (cm *CMap) Clear() { + cm.l.Lock() + cm.m = make(map[string]interface{}) + cm.l.Unlock() +} + +func (cm *CMap) Keys() []string { + cm.l.Lock() + + keys := []string{} + for k := range cm.m { + keys = append(keys, k) + } + cm.l.Unlock() + return keys +} + +func (cm *CMap) Values() []interface{} { + cm.l.Lock() + items := []interface{}{} + for _, v := range cm.m { + items = append(items, v) + } + cm.l.Unlock() + return items +} diff --git a/chains/tendermint_34/libs/common/cmap_test.go b/chains/tendermint_34/libs/common/cmap_test.go new file mode 100755 index 0000000..33d9f04 --- /dev/null +++ b/chains/tendermint_34/libs/common/cmap_test.go @@ -0,0 +1,64 @@ +package common + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIterateKeysWithValues(t *testing.T) { + cmap := NewCMap() + + for i := 1; i <= 10; i++ { + cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) + } + + // Testing size + assert.Equal(t, 10, cmap.Size()) + assert.Equal(t, 10, len(cmap.Keys())) + assert.Equal(t, 10, len(cmap.Values())) + + // Iterating Keys, checking for matching Value + for _, key := range cmap.Keys() { + val := strings.Replace(key, "key", "value", -1) + assert.Equal(t, val, cmap.Get(key)) + } + + // Test if all keys are within []Keys() + keys := cmap.Keys() + for i := 1; i <= 10; i++ { + assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") + } + + // Delete 1 Key + cmap.Delete("key1") + + assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed") +} + +func TestContains(t *testing.T) { + cmap := NewCMap() + + cmap.Set("key1", "value1") + + // Test for known values + assert.True(t, cmap.Has("key1")) + assert.Equal(t, "value1", cmap.Get("key1")) + + // Test for unknown values + assert.False(t, cmap.Has("key2")) + assert.Nil(t, cmap.Get("key2")) +} + +func BenchmarkCMapHas(b *testing.B) { + m := NewCMap() + for i := 0; i < 1000; i++ { + m.Set(string(i), i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Has(string(i)) + } +} diff --git a/chains/tendermint_34/libs/common/colors.go b/chains/tendermint_34/libs/common/colors.go new file mode 100755 index 0000000..4837f97 --- /dev/null +++ b/chains/tendermint_34/libs/common/colors.go @@ -0,0 +1,95 @@ +package common + +import ( + "fmt" + "strings" +) + +const ( + ANSIReset = "\x1b[0m" + ANSIBright = "\x1b[1m" + ANSIDim = "\x1b[2m" + ANSIUnderscore = "\x1b[4m" + ANSIBlink = "\x1b[5m" + ANSIReverse = "\x1b[7m" + ANSIHidden = "\x1b[8m" + + ANSIFgBlack = "\x1b[30m" + ANSIFgRed = "\x1b[31m" + ANSIFgGreen = "\x1b[32m" + ANSIFgYellow = "\x1b[33m" + ANSIFgBlue = "\x1b[34m" + ANSIFgMagenta = "\x1b[35m" + ANSIFgCyan = "\x1b[36m" + ANSIFgWhite = "\x1b[37m" + + ANSIBgBlack = "\x1b[40m" + ANSIBgRed = "\x1b[41m" + ANSIBgGreen = "\x1b[42m" + ANSIBgYellow = "\x1b[43m" + ANSIBgBlue = "\x1b[44m" + ANSIBgMagenta = "\x1b[45m" + ANSIBgCyan = "\x1b[46m" + ANSIBgWhite = "\x1b[47m" +) + +// color the string s with color 'color' +// unless s is already colored +func treat(s string, color string) string { + if len(s) > 2 && s[:2] == "\x1b[" { + return s + } + return color + s + ANSIReset +} + +func treatAll(color string, args ...interface{}) string { + var parts []string + for _, arg := range args { + parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) + } + return strings.Join(parts, "") +} + +func Black(args ...interface{}) string { + return treatAll(ANSIFgBlack, args...) +} + +func Red(args ...interface{}) string { + return treatAll(ANSIFgRed, args...) +} + +func Green(args ...interface{}) string { + return treatAll(ANSIFgGreen, args...) +} + +func Yellow(args ...interface{}) string { + return treatAll(ANSIFgYellow, args...) +} + +func Blue(args ...interface{}) string { + return treatAll(ANSIFgBlue, args...) +} + +func Magenta(args ...interface{}) string { + return treatAll(ANSIFgMagenta, args...) +} + +func Cyan(args ...interface{}) string { + return treatAll(ANSIFgCyan, args...) +} + +func White(args ...interface{}) string { + return treatAll(ANSIFgWhite, args...) +} + +func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string { + s := "" + for _, b := range data { + if 0x21 <= b && b < 0x7F { + s += textColor(string(b)) + } else { + s += bytesColor(fmt.Sprintf("%02X", b)) + } + } + return s +} diff --git a/chains/tendermint_34/libs/common/date.go b/chains/tendermint_34/libs/common/date.go new file mode 100755 index 0000000..e017a4b --- /dev/null +++ b/chains/tendermint_34/libs/common/date.go @@ -0,0 +1,43 @@ +package common + +import ( + "strings" + "time" + + "github.com/pkg/errors" +) + +// TimeLayout helps to parse a date string of the format YYYY-MM-DD +// Intended to be used with the following function: +// time.Parse(TimeLayout, date) +var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD + +// ParseDateRange parses a date range string of the format start:end +// where the start and end date are of the format YYYY-MM-DD. +// The parsed dates are time.Time and will return the zero time for +// unbounded dates, ex: +// unbounded start: :2000-12-31 +// unbounded end: 2000-12-31: +func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) { + dates := strings.Split(dateRange, ":") + if len(dates) != 2 { + err = errors.New("bad date range, must be in format date:date") + return + } + parseDate := func(date string) (out time.Time, err error) { + if len(date) == 0 { + return + } + out, err = time.Parse(TimeLayout, date) + return + } + startDate, err = parseDate(dates[0]) + if err != nil { + return + } + endDate, err = parseDate(dates[1]) + if err != nil { + return + } + return +} diff --git a/chains/tendermint_34/libs/common/date_test.go b/chains/tendermint_34/libs/common/date_test.go new file mode 100755 index 0000000..2c06324 --- /dev/null +++ b/chains/tendermint_34/libs/common/date_test.go @@ -0,0 +1,46 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) + zero time.Time +) + +func TestParseDateRange(t *testing.T) { + assert := assert.New(t) + + var testDates = []struct { + dateStr string + start time.Time + end time.Time + errNil bool + }{ + {"2015-12-31:2016-12-31", date, date2, true}, + {"2015-12-31:", date, zero, true}, + {":2016-12-31", zero, date2, true}, + {"2016-12-31", zero, zero, false}, + {"2016-31-12:", zero, zero, false}, + {":2016-31-12", zero, zero, false}, + } + + for _, test := range testDates { + start, end, err := ParseDateRange(test.dateStr) + if test.errNil { + assert.Nil(err) + testPtr := func(want, have time.Time) { + assert.True(have.Equal(want)) + } + testPtr(test.start, start) + testPtr(test.end, end) + } else { + assert.NotNil(err) + } + } +} diff --git a/chains/tendermint_34/libs/common/errors.go b/chains/tendermint_34/libs/common/errors.go new file mode 100755 index 0000000..10e40eb --- /dev/null +++ b/chains/tendermint_34/libs/common/errors.go @@ -0,0 +1,246 @@ +package common + +import ( + "fmt" + "runtime" +) + +//---------------------------------------- +// Convenience method. + +func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { + if causeCmnError, ok := cause.(*cmnError); ok { + msg := fmt.Sprintf(format, args...) + return causeCmnError.Stacktrace().Trace(1, msg) + } else if cause == nil { + return newCmnError(FmtError{format, args}).Stacktrace() + } else { + // NOTE: causeCmnError is a typed nil here. + msg := fmt.Sprintf(format, args...) + return newCmnError(cause).Stacktrace().Trace(1, msg) + } +} + +//---------------------------------------- +// Error & cmnError + +/* + +Usage with arbitrary error data: + +```go + // Error construction + type MyError struct{} + var err1 error = NewErrorWithData(MyError{}, "my message") + ... + // Wrapping + var err2 error = ErrorWrap(err1, "another message") + if (err1 != err2) { panic("should be the same") + ... + // Error handling + switch err2.Data().(type){ + case MyError: ... + default: ... + } +``` +*/ +type Error interface { + Error() string + Stacktrace() Error + Trace(offset int, format string, args ...interface{}) Error + Data() interface{} +} + +// New Error with formatted message. +// The Error's Data will be a FmtError type. +func NewError(format string, args ...interface{}) Error { + err := FmtError{format, args} + return newCmnError(err) +} + +// New Error with specified data. +func NewErrorWithData(data interface{}) Error { + return newCmnError(data) +} + +type cmnError struct { + data interface{} // associated data + msgtraces []msgtraceItem // all messages traced + stacktrace []uintptr // first stack trace +} + +var _ Error = &cmnError{} + +// NOTE: do not expose. +func newCmnError(data interface{}) *cmnError { + return &cmnError{ + data: data, + msgtraces: nil, + stacktrace: nil, + } +} + +// Implements error. +func (err *cmnError) Error() string { + return fmt.Sprintf("%v", err) +} + +// Captures a stacktrace if one was not already captured. +func (err *cmnError) Stacktrace() Error { + if err.stacktrace == nil { + var offset = 3 + var depth = 32 + err.stacktrace = captureStacktrace(offset, depth) + } + return err +} + +// Add tracing information with msg. +// Set n=0 unless wrapped with some function, then n > 0. +func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error { + msg := fmt.Sprintf(format, args...) + return err.doTrace(msg, offset) +} + +// Return the "data" of this error. +// Data could be used for error handling/switching, +// or for holding general error/debug information. +func (err *cmnError) Data() interface{} { + return err.data +} + +func (err *cmnError) doTrace(msg string, n int) Error { + pc, _, _, _ := runtime.Caller(n + 2) // +1 for doTrace(). +1 for the caller. + // Include file & line number & msg. + // Do not include the whole stack trace. + err.msgtraces = append(err.msgtraces, msgtraceItem{ + pc: pc, + msg: msg, + }) + return err +} + +func (err *cmnError) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", &err))) + default: + if s.Flag('#') { + s.Write([]byte("--= Error =--\n")) + // Write data. + s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data))) + // Write msg trace items. + s.Write([]byte(fmt.Sprintf("Msg Traces:\n"))) + for i, msgtrace := range err.msgtraces { + s.Write([]byte(fmt.Sprintf(" %4d %s\n", i, msgtrace.String()))) + } + // Write stack trace. + if err.stacktrace != nil { + s.Write([]byte(fmt.Sprintf("Stack Trace:\n"))) + for i, pc := range err.stacktrace { + fnc := runtime.FuncForPC(pc) + file, line := fnc.FileLine(pc) + s.Write([]byte(fmt.Sprintf(" %4d %s:%d\n", i, file, line))) + } + } + s.Write([]byte("--= /Error =--\n")) + } else { + // Write msg. + s.Write([]byte(fmt.Sprintf("%v", err.data))) + } + } +} + +//---------------------------------------- +// stacktrace & msgtraceItem + +func captureStacktrace(offset int, depth int) []uintptr { + var pcs = make([]uintptr, depth) + n := runtime.Callers(offset, pcs) + return pcs[0:n] +} + +type msgtraceItem struct { + pc uintptr + msg string +} + +func (mti msgtraceItem) String() string { + fnc := runtime.FuncForPC(mti.pc) + file, line := fnc.FileLine(mti.pc) + return fmt.Sprintf("%s:%d - %s", + file, line, + mti.msg, + ) +} + +//---------------------------------------- +// fmt error + +/* + +FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError)) +Theoretically it could be used to switch on the format string. + +```go + // Error construction + var err1 error = NewError("invalid username %v", "BOB") + var err2 error = NewError("another kind of error") + ... + // Error handling + switch err1.Data().(cmn.FmtError).Format() { + case "invalid username %v": ... + case "another kind of error": ... + default: ... + } +``` +*/ +type FmtError struct { + format string + args []interface{} +} + +func (fe FmtError) Error() string { + return fmt.Sprintf(fe.format, fe.args...) +} + +func (fe FmtError) String() string { + return fmt.Sprintf("FmtError{format:%v,args:%v}", + fe.format, fe.args) +} + +func (fe FmtError) Format() string { + return fe.format +} + +//---------------------------------------- +// Panic wrappers +// XXX DEPRECATED + +// A panic resulting from a sanity check means there is a programmer error +// and some guarantee is not satisfied. +// XXX DEPRECATED +func PanicSanity(v interface{}) { + panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v)) +} + +// A panic here means something has gone horribly wrong, in the form of data corruption or +// failure of the operating system. In a correct/healthy system, these should never fire. +// If they do, it's indicative of a much more serious problem. +// XXX DEPRECATED +func PanicCrisis(v interface{}) { + panic(fmt.Sprintf("Panicked on a Crisis: %v", v)) +} + +// Indicates a failure of consensus. Someone was malicious or something has +// gone horribly wrong. These should really boot us into an "emergency-recover" mode +// XXX DEPRECATED +func PanicConsensus(v interface{}) { + panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v)) +} + +// For those times when we're not sure if we should panic +// XXX DEPRECATED +func PanicQ(v interface{}) { + panic(fmt.Sprintf("Panicked questionably: %v", v)) +} diff --git a/chains/tendermint_34/libs/common/errors_test.go b/chains/tendermint_34/libs/common/errors_test.go new file mode 100755 index 0000000..b85936d --- /dev/null +++ b/chains/tendermint_34/libs/common/errors_test.go @@ -0,0 +1,101 @@ +package common + +import ( + fmt "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorPanic(t *testing.T) { + type pnk struct { + msg string + } + + capturePanic := func() (err Error) { + defer func() { + if r := recover(); r != nil { + err = ErrorWrap(r, "This is the message in ErrorWrap(r, message).") + } + }() + panic(pnk{"something"}) + } + + var err = capturePanic() + + assert.Equal(t, pnk{"something"}, err.Data()) + assert.Equal(t, "{something}", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapSomething(t *testing.T) { + + var err = ErrorWrap("something", "formatter%v%v", 0, 1) + + assert.Equal(t, "something", err.Data()) + assert.Equal(t, "something", fmt.Sprintf("%v", err)) + assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorWrapNothing(t *testing.T) { + + var err = ErrorWrap(nil, "formatter%v%v", 0, 1) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewError(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") +} + +func TestErrorNewErrorWithStacktrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1).Stacktrace() + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") +} + +func TestErrorNewErrorWithTrace(t *testing.T) { + + var err = NewError("formatter%v%v", 0, 1) + err.Trace(0, "trace %v", 1) + err.Trace(0, "trace %v", 2) + err.Trace(0, "trace %v", 3) + + assert.Equal(t, + FmtError{"formatter%v%v", []interface{}{0, 1}}, + err.Data()) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) + assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) + dump := fmt.Sprintf("%#v", err) + assert.NotContains(t, dump, "Stack Trace") + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 2`, dump) + assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 3`, dump) +} + +func TestErrorWrapError(t *testing.T) { + var err1 error = NewError("my message") + var err2 error = ErrorWrap(err1, "another message") + assert.Equal(t, err1, err2) +} diff --git a/chains/tendermint_34/libs/common/heap.go b/chains/tendermint_34/libs/common/heap.go new file mode 100755 index 0000000..b3bcb9d --- /dev/null +++ b/chains/tendermint_34/libs/common/heap.go @@ -0,0 +1,125 @@ +package common + +import ( + "bytes" + "container/heap" +) + +/* + Example usage: + + ``` + h := NewHeap() + + h.Push("msg1", 1) + h.Push("msg3", 3) + h.Push("msg2", 2) + + fmt.Println(h.Pop()) // msg1 + fmt.Println(h.Pop()) // msg2 + fmt.Println(h.Pop()) // msg3 + ``` +*/ +type Heap struct { + pq priorityQueue +} + +func NewHeap() *Heap { + return &Heap{pq: make([]*pqItem, 0)} +} + +func (h *Heap) Len() int64 { + return int64(len(h.pq)) +} + +func (h *Heap) Push(value interface{}, priority int) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) +} + +func (h *Heap) PushBytes(value interface{}, priority []byte) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) +} + +func (h *Heap) PushComparable(value interface{}, priority Comparable) { + heap.Push(&h.pq, &pqItem{value: value, priority: priority}) +} + +func (h *Heap) Peek() interface{} { + if len(h.pq) == 0 { + return nil + } + return h.pq[0].value +} + +func (h *Heap) Update(value interface{}, priority Comparable) { + h.pq.Update(h.pq[0], value, priority) +} + +func (h *Heap) Pop() interface{} { + item := heap.Pop(&h.pq).(*pqItem) + return item.value +} + +//----------------------------------------------------------------------------- +// From: http://golang.org/pkg/container/heap/#example__priorityQueue + +type pqItem struct { + value interface{} + priority Comparable + index int +} + +type priorityQueue []*pqItem + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].priority.Less(pq[j].priority) +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*pqItem) + item.index = n + *pq = append(*pq, item) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { + item.value = value + item.priority = priority + heap.Fix(pq, item.index) +} + +//-------------------------------------------------------------------------------- +// Comparable + +type Comparable interface { + Less(o interface{}) bool +} + +type cmpInt int + +func (i cmpInt) Less(o interface{}) bool { + return int(i) < int(o.(cmpInt)) +} + +type cmpBytes []byte + +func (bz cmpBytes) Less(o interface{}) bool { + return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 +} diff --git a/chains/tendermint_34/libs/common/int.go b/chains/tendermint_34/libs/common/int.go new file mode 100755 index 0000000..845dc97 --- /dev/null +++ b/chains/tendermint_34/libs/common/int.go @@ -0,0 +1,11 @@ +package common + +// IntInSlice returns true if a is found in the list. +func IntInSlice(a int, list []int) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/chains/tendermint_34/libs/common/int_test.go b/chains/tendermint_34/libs/common/int_test.go new file mode 100755 index 0000000..1ecc784 --- /dev/null +++ b/chains/tendermint_34/libs/common/int_test.go @@ -0,0 +1,14 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIntInSlice(t *testing.T) { + assert.True(t, IntInSlice(1, []int{1, 2, 3})) + assert.False(t, IntInSlice(4, []int{1, 2, 3})) + assert.True(t, IntInSlice(0, []int{0})) + assert.False(t, IntInSlice(0, []int{})) +} diff --git a/chains/tendermint_34/libs/common/io.go b/chains/tendermint_34/libs/common/io.go new file mode 100755 index 0000000..fa0443e --- /dev/null +++ b/chains/tendermint_34/libs/common/io.go @@ -0,0 +1,74 @@ +package common + +import ( + "bytes" + "errors" + "io" +) + +type PrefixedReader struct { + Prefix []byte + reader io.Reader +} + +func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { + return &PrefixedReader{prefix, reader} +} + +func (pr *PrefixedReader) Read(p []byte) (n int, err error) { + if len(pr.Prefix) > 0 { + read := copy(p, pr.Prefix) + pr.Prefix = pr.Prefix[read:] + return read, nil + } + return pr.reader.Read(p) +} + +// NOTE: Not goroutine safe +type BufferCloser struct { + bytes.Buffer + Closed bool +} + +func NewBufferCloser(buf []byte) *BufferCloser { + return &BufferCloser{ + *bytes.NewBuffer(buf), + false, + } +} + +func (bc *BufferCloser) Close() error { + if bc.Closed { + return errors.New("BufferCloser already closed") + } + bc.Closed = true + return nil +} + +func (bc *BufferCloser) Write(p []byte) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.Write(p) +} + +func (bc *BufferCloser) WriteByte(c byte) error { + if bc.Closed { + return errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteByte(c) +} + +func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteRune(r) +} + +func (bc *BufferCloser) WriteString(s string) (n int, err error) { + if bc.Closed { + return 0, errors.New("Cannot write to closed BufferCloser") + } + return bc.Buffer.WriteString(s) +} diff --git a/chains/tendermint_34/libs/common/kvpair.go b/chains/tendermint_34/libs/common/kvpair.go new file mode 100755 index 0000000..54c3a58 --- /dev/null +++ b/chains/tendermint_34/libs/common/kvpair.go @@ -0,0 +1,67 @@ +package common + +import ( + "bytes" + "sort" +) + +//---------------------------------------- +// KVPair + +/* +Defined in types.proto + +type KVPair struct { + Key []byte + Value []byte +} +*/ + +type KVPairs []KVPair + +// Sorting +func (kvs KVPairs) Len() int { return len(kvs) } +func (kvs KVPairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KVPairs) Sort() { sort.Sort(kvs) } + +//---------------------------------------- +// KI64Pair + +/* +Defined in types.proto +type KI64Pair struct { + Key []byte + Value int64 +} +*/ + +type KI64Pairs []KI64Pair + +// Sorting +func (kvs KI64Pairs) Len() int { return len(kvs) } +func (kvs KI64Pairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return kvs[i].Value < kvs[j].Value + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } diff --git a/chains/tendermint_34/libs/common/math.go b/chains/tendermint_34/libs/common/math.go new file mode 100755 index 0000000..ae91f20 --- /dev/null +++ b/chains/tendermint_34/libs/common/math.go @@ -0,0 +1,31 @@ +package common + +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/chains/tendermint_34/libs/common/net.go b/chains/tendermint_34/libs/common/net.go new file mode 100755 index 0000000..c7fff4c --- /dev/null +++ b/chains/tendermint_34/libs/common/net.go @@ -0,0 +1,43 @@ +package common + +import ( + "net" + "strings" +) + +// Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, +// eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" +func Connect(protoAddr string) (net.Conn, error) { + proto, address := ProtocolAndAddress(protoAddr) + conn, err := net.Dial(proto, address) + return conn, err +} + +// ProtocolAndAddress splits an address into the protocol and address components. +// For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". +// If the address has no protocol prefix, the default is "tcp". +func ProtocolAndAddress(listenAddr string) (string, string) { + protocol, address := "tcp", listenAddr + parts := strings.SplitN(address, "://", 2) + if len(parts) == 2 { + protocol, address = parts[0], parts[1] + } + return protocol, address +} + +// GetFreePort gets a free port from the operating system. +// Ripped from https://github.com/phayes/freeport. +// BSD-licensed. +func GetFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} diff --git a/chains/tendermint_34/libs/common/net_test.go b/chains/tendermint_34/libs/common/net_test.go new file mode 100755 index 0000000..38d2ae8 --- /dev/null +++ b/chains/tendermint_34/libs/common/net_test.go @@ -0,0 +1,38 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProtocolAndAddress(t *testing.T) { + + cases := []struct { + fullAddr string + proto string + addr string + }{ + { + "tcp://mydomain:80", + "tcp", + "mydomain:80", + }, + { + "mydomain:80", + "tcp", + "mydomain:80", + }, + { + "unix://mydomain:80", + "unix", + "mydomain:80", + }, + } + + for _, c := range cases { + proto, addr := ProtocolAndAddress(c.fullAddr) + assert.Equal(t, proto, c.proto) + assert.Equal(t, addr, c.addr) + } +} diff --git a/chains/tendermint_34/libs/common/nil.go b/chains/tendermint_34/libs/common/nil.go new file mode 100755 index 0000000..31f75f0 --- /dev/null +++ b/chains/tendermint_34/libs/common/nil.go @@ -0,0 +1,29 @@ +package common + +import "reflect" + +// Go lacks a simple and safe way to see if something is a typed nil. +// See: +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 +func IsTypedNil(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +// Returns true if it has zero length. +func IsEmpty(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() == 0 + default: + return false + } +} diff --git a/chains/tendermint_34/libs/common/os.go b/chains/tendermint_34/libs/common/os.go new file mode 100755 index 0000000..501bb56 --- /dev/null +++ b/chains/tendermint_34/libs/common/os.go @@ -0,0 +1,140 @@ +package common + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "strings" + "syscall" +) + +var gopath string + +// GoPath returns GOPATH env variable value. If it is not set, this function +// will try to call `go env GOPATH` subcommand. +func GoPath() string { + if gopath != "" { + return gopath + } + + path := os.Getenv("GOPATH") + if len(path) == 0 { + goCmd := exec.Command("go", "env", "GOPATH") + out, err := goCmd.Output() + if err != nil { + panic(fmt.Sprintf("failed to determine gopath: %v", err)) + } + path = string(out) + } + gopath = path + return path +} + +// TrapSignal catches the SIGTERM and executes cb function. After that it exits +// with code 1. +func TrapSignal(cb func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + fmt.Printf("captured %v, exiting...\n", sig) + if cb != nil { + cb() + } + os.Exit(1) + } + }() + select {} +} + +// Kill the running process by sending itself SIGTERM. +func Kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + return p.Signal(syscall.SIGTERM) +} + +func Exit(s string) { + fmt.Printf(s + "\n") + os.Exit(1) +} + +func EnsureDir(dir string, mode os.FileMode) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + err := os.MkdirAll(dir, mode) + if err != nil { + return fmt.Errorf("Could not create directory %v. %v", dir, err) + } + } + return nil +} + +func IsDirEmpty(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + if os.IsNotExist(err) { + return true, err + } + // Otherwise perhaps a permission + // error or some other error. + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} + +func FileExists(filePath string) bool { + _, err := os.Stat(filePath) + return !os.IsNotExist(err) +} + +func ReadFile(filePath string) ([]byte, error) { + return ioutil.ReadFile(filePath) +} + +func MustReadFile(filePath string) []byte { + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + Exit(fmt.Sprintf("MustReadFile failed: %v", err)) + return nil + } + return fileBytes +} + +func WriteFile(filePath string, contents []byte, mode os.FileMode) error { + return ioutil.WriteFile(filePath, contents, mode) +} + +func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { + err := WriteFile(filePath, contents, mode) + if err != nil { + Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) + } +} + +//-------------------------------------------------------------------------------- + +func Prompt(prompt string, defaultValue string) (string, error) { + fmt.Print(prompt) + reader := bufio.NewReader(os.Stdin) + line, err := reader.ReadString('\n') + if err != nil { + return defaultValue, err + } + line = strings.TrimSpace(line) + if line == "" { + return defaultValue, nil + } + return line, nil +} diff --git a/chains/tendermint_34/libs/common/os_test.go b/chains/tendermint_34/libs/common/os_test.go new file mode 100755 index 0000000..bf65f0c --- /dev/null +++ b/chains/tendermint_34/libs/common/os_test.go @@ -0,0 +1,46 @@ +package common + +import ( + "os" + "testing" +) + +func TestGoPath(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + err := os.Setenv("GOPATH", "~/testgopath") + if err != nil { + t.Fatal(err) + } + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("should get GOPATH env var value, got %v", path) + } + os.Unsetenv("GOPATH") + + path = GoPath() + if path != "~/testgopath" { + t.Fatalf("subsequent calls should return the same value, got %v", path) + } +} + +func TestGoPathWithoutEnvVar(t *testing.T) { + // restore original gopath upon exit + path := os.Getenv("GOPATH") + defer func() { + _ = os.Setenv("GOPATH", path) + }() + + os.Unsetenv("GOPATH") + // reset cache + gopath = "" + + path = GoPath() + if path == "" || path == "~/testgopath" { + t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path) + } +} diff --git a/chains/tendermint_34/libs/common/random.go b/chains/tendermint_34/libs/common/random.go new file mode 100755 index 0000000..2de6594 --- /dev/null +++ b/chains/tendermint_34/libs/common/random.go @@ -0,0 +1,306 @@ +package common + +import ( + crand "crypto/rand" + mrand "math/rand" + "sync" + "time" +) + +const ( + strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters +) + +// Rand is a prng, that is seeded with OS randomness. +// The OS randomness is obtained from crypto/rand, however none of the provided +// methods are suitable for cryptographic usage. +// They all utilize math/rand's prng internally. +// +// All of the methods here are suitable for concurrent use. +// This is achieved by using a mutex lock on all of the provided methods. +type Rand struct { + sync.Mutex + rand *mrand.Rand +} + +var grand *Rand + +func init() { + grand = NewRand() + grand.init() +} + +func NewRand() *Rand { + rand := &Rand{} + rand.init() + return rand +} + +func (r *Rand) init() { + bz := cRandBytes(8) + var seed uint64 + for i := 0; i < 8; i++ { + seed |= uint64(bz[i]) + seed <<= 8 + } + r.reset(int64(seed)) +} + +func (r *Rand) reset(seed int64) { + r.rand = mrand.New(mrand.NewSource(seed)) +} + +//---------------------------------------- +// Global functions + +func Seed(seed int64) { + grand.Seed(seed) +} + +func RandStr(length int) string { + return grand.Str(length) +} + +func RandUint16() uint16 { + return grand.Uint16() +} + +func RandUint32() uint32 { + return grand.Uint32() +} + +func RandUint64() uint64 { + return grand.Uint64() +} + +func RandUint() uint { + return grand.Uint() +} + +func RandInt16() int16 { + return grand.Int16() +} + +func RandInt32() int32 { + return grand.Int32() +} + +func RandInt64() int64 { + return grand.Int64() +} + +func RandInt() int { + return grand.Int() +} + +func RandInt31() int32 { + return grand.Int31() +} + +func RandInt31n(n int32) int32 { + return grand.Int31n(n) +} + +func RandInt63() int64 { + return grand.Int63() +} + +func RandInt63n(n int64) int64 { + return grand.Int63n(n) +} + +func RandBool() bool { + return grand.Bool() +} + +func RandFloat32() float32 { + return grand.Float32() +} + +func RandFloat64() float64 { + return grand.Float64() +} + +func RandTime() time.Time { + return grand.Time() +} + +func RandBytes(n int) []byte { + return grand.Bytes(n) +} + +func RandIntn(n int) int { + return grand.Intn(n) +} + +func RandPerm(n int) []int { + return grand.Perm(n) +} + +//---------------------------------------- +// Rand methods + +func (r *Rand) Seed(seed int64) { + r.Lock() + r.reset(seed) + r.Unlock() +} + +// Str constructs a random alphanumeric string of given length. +func (r *Rand) Str(length int) string { + chars := []byte{} +MAIN_LOOP: + for { + val := r.Int63() + for i := 0; i < 10; i++ { + v := int(val & 0x3f) // rightmost 6 bits + if v >= 62 { // only 62 characters in strChars + val >>= 6 + continue + } else { + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 + } + } + } + + return string(chars) +} + +func (r *Rand) Uint16() uint16 { + return uint16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Uint32() uint32 { + r.Lock() + u32 := r.rand.Uint32() + r.Unlock() + return u32 +} + +func (r *Rand) Uint64() uint64 { + return uint64(r.Uint32())<<32 + uint64(r.Uint32()) +} + +func (r *Rand) Uint() uint { + r.Lock() + i := r.rand.Int() + r.Unlock() + return uint(i) +} + +func (r *Rand) Int16() int16 { + return int16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Int32() int32 { + return int32(r.Uint32()) +} + +func (r *Rand) Int64() int64 { + return int64(r.Uint64()) +} + +func (r *Rand) Int() int { + r.Lock() + i := r.rand.Int() + r.Unlock() + return i +} + +func (r *Rand) Int31() int32 { + r.Lock() + i31 := r.rand.Int31() + r.Unlock() + return i31 +} + +func (r *Rand) Int31n(n int32) int32 { + r.Lock() + i31n := r.rand.Int31n(n) + r.Unlock() + return i31n +} + +func (r *Rand) Int63() int64 { + r.Lock() + i63 := r.rand.Int63() + r.Unlock() + return i63 +} + +func (r *Rand) Int63n(n int64) int64 { + r.Lock() + i63n := r.rand.Int63n(n) + r.Unlock() + return i63n +} + +func (r *Rand) Float32() float32 { + r.Lock() + f32 := r.rand.Float32() + r.Unlock() + return f32 +} + +func (r *Rand) Float64() float64 { + r.Lock() + f64 := r.rand.Float64() + r.Unlock() + return f64 +} + +func (r *Rand) Time() time.Time { + return time.Unix(int64(r.Uint64()), 0) +} + +// Bytes returns n random bytes generated from the internal +// prng. +func (r *Rand) Bytes(n int) []byte { + // cRandBytes isn't guaranteed to be fast so instead + // use random bytes generated from the internal PRNG + bs := make([]byte, n) + for i := 0; i < len(bs); i++ { + bs[i] = byte(r.Int() & 0xFF) + } + return bs +} + +// Intn returns, as an int, a uniform pseudo-random number in the range [0, n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + r.Lock() + i := r.rand.Intn(n) + r.Unlock() + return i +} + +// Bool returns a uniformly random boolean +func (r *Rand) Bool() bool { + // See https://github.com/golang/go/issues/23804#issuecomment-365370418 + // for reasoning behind computing like this + return r.Int63()%2 == 0 +} + +// Perm returns a pseudo-random permutation of n integers in [0, n). +func (r *Rand) Perm(n int) []int { + r.Lock() + perm := r.rand.Perm(n) + r.Unlock() + return perm +} + +// NOTE: This relies on the os's random number generator. +// For real security, we should salt that with some seed. +// See github.com/tendermint/tendermint/crypto for a more secure reader. +func cRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + PanicCrisis(err) + } + return b +} diff --git a/chains/tendermint_34/libs/common/random_test.go b/chains/tendermint_34/libs/common/random_test.go new file mode 100755 index 0000000..c59a577 --- /dev/null +++ b/chains/tendermint_34/libs/common/random_test.go @@ -0,0 +1,118 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + mrand "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRandStr(t *testing.T) { + l := 243 + s := RandStr(l) + assert.Equal(t, l, len(s)) +} + +func TestRandBytes(t *testing.T) { + l := 243 + b := RandBytes(l) + assert.Equal(t, l, len(b)) +} + +func TestRandIntn(t *testing.T) { + n := 243 + for i := 0; i < 100; i++ { + x := RandIntn(n) + assert.True(t, x < n) + } +} + +// Test to make sure that we never call math.rand(). +// We do this by ensuring that outputs are deterministic. +func TestDeterminism(t *testing.T) { + var firstOutput string + + // Set math/rand's seed for the sake of debugging this test. + // (It isn't strictly necessary). + mrand.Seed(1) + + for i := 0; i < 100; i++ { + output := testThemAll() + if i == 0 { + firstOutput = output + } else { + if firstOutput != output { + t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) + } + } + } +} + +func testThemAll() string { + + // Such determinism. + grand.reset(1) + + // Use it. + out := new(bytes.Buffer) + perm := RandPerm(10) + blob, _ := json.Marshal(perm) + fmt.Fprintf(out, "perm: %s\n", blob) + fmt.Fprintf(out, "randInt: %d\n", RandInt()) + fmt.Fprintf(out, "randUint: %d\n", RandUint()) + fmt.Fprintf(out, "randIntn: %d\n", RandIntn(97)) + fmt.Fprintf(out, "randInt31: %d\n", RandInt31()) + fmt.Fprintf(out, "randInt32: %d\n", RandInt32()) + fmt.Fprintf(out, "randInt63: %d\n", RandInt63()) + fmt.Fprintf(out, "randInt64: %d\n", RandInt64()) + fmt.Fprintf(out, "randUint32: %d\n", RandUint32()) + fmt.Fprintf(out, "randUint64: %d\n", RandUint64()) + return out.String() +} + +func TestRngConcurrencySafety(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + _ = RandUint64() + <-time.After(time.Millisecond * time.Duration(RandIntn(100))) + _ = RandPerm(3) + }() + } + wg.Wait() +} + +func BenchmarkRandBytes10B(b *testing.B) { + benchmarkRandBytes(b, 10) +} +func BenchmarkRandBytes100B(b *testing.B) { + benchmarkRandBytes(b, 100) +} +func BenchmarkRandBytes1KiB(b *testing.B) { + benchmarkRandBytes(b, 1024) +} +func BenchmarkRandBytes10KiB(b *testing.B) { + benchmarkRandBytes(b, 10*1024) +} +func BenchmarkRandBytes100KiB(b *testing.B) { + benchmarkRandBytes(b, 100*1024) +} +func BenchmarkRandBytes1MiB(b *testing.B) { + benchmarkRandBytes(b, 1024*1024) +} + +func benchmarkRandBytes(b *testing.B, n int) { + for i := 0; i < b.N; i++ { + _ = RandBytes(n) + } + b.ReportAllocs() +} diff --git a/chains/tendermint_34/libs/common/repeat_timer.go b/chains/tendermint_34/libs/common/repeat_timer.go new file mode 100755 index 0000000..5d04973 --- /dev/null +++ b/chains/tendermint_34/libs/common/repeat_timer.go @@ -0,0 +1,232 @@ +package common + +import ( + "sync" + "time" +) + +// Used by RepeatTimer the first time, +// and every time it's Reset() after Stop(). +type TickerMaker func(dur time.Duration) Ticker + +// Ticker is a basic ticker interface. +type Ticker interface { + + // Never changes, never closes. + Chan() <-chan time.Time + + // Stopping a stopped Ticker will panic. + Stop() +} + +//---------------------------------------- +// defaultTicker + +var _ Ticker = (*defaultTicker)(nil) + +type defaultTicker time.Ticker + +func defaultTickerMaker(dur time.Duration) Ticker { + ticker := time.NewTicker(dur) + return (*defaultTicker)(ticker) +} + +// Implements Ticker +func (t *defaultTicker) Chan() <-chan time.Time { + return t.C +} + +// Implements Ticker +func (t *defaultTicker) Stop() { + ((*time.Ticker)(t)).Stop() +} + +//---------------------------------------- +// LogicalTickerMaker + +// Construct a TickerMaker that always uses `source`. +// It's useful for simulating a deterministic clock. +func NewLogicalTickerMaker(source chan time.Time) TickerMaker { + return func(dur time.Duration) Ticker { + return newLogicalTicker(source, dur) + } +} + +type logicalTicker struct { + source <-chan time.Time + ch chan time.Time + quit chan struct{} +} + +func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { + lt := &logicalTicker{ + source: source, + ch: make(chan time.Time), + quit: make(chan struct{}), + } + go lt.fireRoutine(interval) + return lt +} + +// We need a goroutine to read times from t.source +// and fire on t.Chan() when `interval` has passed. +func (t *logicalTicker) fireRoutine(interval time.Duration) { + source := t.source + + // Init `lasttime` + lasttime := time.Time{} + select { + case lasttime = <-source: + case <-t.quit: + return + } + // Init `lasttime` end + + for { + select { + case newtime := <-source: + elapsed := newtime.Sub(lasttime) + if interval <= elapsed { + // Block for determinism until the ticker is stopped. + select { + case t.ch <- newtime: + case <-t.quit: + return + } + // Reset timeleft. + // Don't try to "catch up" by sending more. + // "Ticker adjusts the intervals or drops ticks to make up for + // slow receivers" - https://golang.org/pkg/time/#Ticker + lasttime = newtime + } + case <-t.quit: + return // done + } + } +} + +// Implements Ticker +func (t *logicalTicker) Chan() <-chan time.Time { + return t.ch // immutable +} + +// Implements Ticker +func (t *logicalTicker) Stop() { + close(t.quit) // it *should* panic when stopped twice. +} + +//--------------------------------------------------------------------- + +/* + RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur` + period. (It's good for keeping connections alive.) + A RepeatTimer must be stopped, or it will keep a goroutine alive. +*/ +type RepeatTimer struct { + name string + ch chan time.Time + tm TickerMaker + + mtx sync.Mutex + dur time.Duration + ticker Ticker + quit chan struct{} +} + +// NewRepeatTimer returns a RepeatTimer with a defaultTicker. +func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { + return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker) +} + +// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker +// maker. +func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer { + var t = &RepeatTimer{ + name: name, + ch: make(chan time.Time), + tm: tm, + dur: dur, + ticker: nil, + quit: nil, + } + t.reset() + return t +} + +// receive ticks on ch, send out on t.ch +func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { + for { + select { + case tick := <-ch: + select { + case t.ch <- tick: + case <-quit: + return + } + case <-quit: // NOTE: `t.quit` races. + return + } + } +} + +func (t *RepeatTimer) Chan() <-chan time.Time { + return t.ch +} + +func (t *RepeatTimer) Stop() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.stop() +} + +// Wait the duration again before firing. +func (t *RepeatTimer) Reset() { + t.mtx.Lock() + defer t.mtx.Unlock() + + t.reset() +} + +//---------------------------------------- +// Misc. + +// CONTRACT: (non-constructor) caller should hold t.mtx. +func (t *RepeatTimer) reset() { + if t.ticker != nil { + t.stop() + } + t.ticker = t.tm(t.dur) + t.quit = make(chan struct{}) + go t.fireRoutine(t.ticker.Chan(), t.quit) +} + +// CONTRACT: caller should hold t.mtx. +func (t *RepeatTimer) stop() { + if t.ticker == nil { + /* + Similar to the case of closing channels twice: + https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk + Stopping a RepeatTimer twice implies that you do + not know whether you are done or not. + If you're calling stop on a stopped RepeatTimer, + you probably have race conditions. + */ + panic("Tried to stop a stopped RepeatTimer") + } + t.ticker.Stop() + t.ticker = nil + /* + From https://golang.org/pkg/time/#Ticker: + "Stop the ticker to release associated resources" + "After Stop, no more ticks will be sent" + So we shouldn't have to do the below. + + select { + case <-t.ch: + // read off channel if there's anything there + default: + } + */ + close(t.quit) +} diff --git a/chains/tendermint_34/libs/common/repeat_timer_test.go b/chains/tendermint_34/libs/common/repeat_timer_test.go new file mode 100755 index 0000000..fee6b31 --- /dev/null +++ b/chains/tendermint_34/libs/common/repeat_timer_test.go @@ -0,0 +1,135 @@ +package common + +import ( + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" +) + +func TestDefaultTicker(t *testing.T) { + ticker := defaultTickerMaker(time.Millisecond * 10) + <-ticker.Chan() + ticker.Stop() +} + +func TestRepeatTimer(t *testing.T) { + + ch := make(chan time.Time, 100) + mtx := new(sync.Mutex) + + // tick() fires from start to end + // (exclusive) in milliseconds with incr. + // It locks on mtx, so subsequent calls + // run in series. + tick := func(startMs, endMs, incrMs time.Duration) { + mtx.Lock() + go func() { + for tMs := startMs; tMs < endMs; tMs += incrMs { + lt := time.Time{} + lt = lt.Add(tMs * time.Millisecond) + ch <- lt + } + mtx.Unlock() + }() + } + + // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". + tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { + + // Check against timesMs. + for _, timeMs := range timesMs { + tyme := <-rt.Chan() + sinceMs := tyme.Sub(time.Time{}) / time.Millisecond + assert.Equal(t, timeMs, int64(sinceMs)) + } + + // goroutines to ensure that + // no other times will fire. + // See https://github.com/tendermint/tendermint/libs/issues/120. + time.Sleep(time.Millisecond * 100) + done := true + select { + case <-rt.Chan(): + done = false + default: + } + assert.True(t, done) + } + + tm := NewLogicalTickerMaker(ch) + rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) + + /* NOTE: Useful for debugging deadlocks... + go func() { + time.Sleep(time.Second * 3) + trace := make([]byte, 102400) + count := runtime.Stack(trace, true) + fmt.Printf("Stack of %d bytes: %s\n", count, trace) + }() + */ + + tick(0, 1000, 10) + tock(t, rt, []int64{}) + tick(1000, 2000, 10) + tock(t, rt, []int64{1000}) + tick(2005, 5000, 10) + tock(t, rt, []int64{2005, 3005, 4005}) + tick(5001, 5999, 1) + // Read 5005 instead of 5001 because + // it's 1 second greater than 4005. + tock(t, rt, []int64{5005}) + tick(6000, 7005, 1) + tock(t, rt, []int64{6005}) + tick(7033, 8032, 1) + tock(t, rt, []int64{7033}) + + // After a reset, nothing happens + // until two ticks are received. + rt.Reset() + tock(t, rt, []int64{}) + tick(8040, 8041, 1) + tock(t, rt, []int64{}) + tick(9555, 9556, 1) + tock(t, rt, []int64{9555}) + + // After a stop, nothing more is sent. + rt.Stop() + tock(t, rt, []int64{}) + + // Another stop panics. + assert.Panics(t, func() { rt.Stop() }) +} + +func TestRepeatTimerReset(t *testing.T) { + // check that we are not leaking any go-routines + defer leaktest.Check(t)() + + timer := NewRepeatTimer("test", 20*time.Millisecond) + defer timer.Stop() + + // test we don't receive tick before duration ms. + select { + case <-timer.Chan(): + t.Fatal("did not expect to receive tick") + default: + } + + timer.Reset() + + // test we receive tick after Reset is called + select { + case <-timer.Chan(): + // all good + case <-time.After(40 * time.Millisecond): + t.Fatal("expected to receive tick after reset") + } + + // just random calls + for i := 0; i < 100; i++ { + time.Sleep(time.Duration(RandIntn(40)) * time.Millisecond) + timer.Reset() + } +} diff --git a/chains/tendermint_34/libs/common/service.go b/chains/tendermint_34/libs/common/service.go new file mode 100755 index 0000000..96a5e63 --- /dev/null +++ b/chains/tendermint_34/libs/common/service.go @@ -0,0 +1,220 @@ +package common + +import ( + "errors" + "fmt" + "sync/atomic" + + "github.com/tendermint/tendermint/libs/log" +) + +var ( + // ErrAlreadyStarted is returned when somebody tries to start an already + // running service. + ErrAlreadyStarted = errors.New("already started") + // ErrAlreadyStopped is returned when somebody tries to stop an already + // stopped service (without resetting it). + ErrAlreadyStopped = errors.New("already stopped") + // ErrNotStarted is returned when somebody tries to stop a not running + // service. + ErrNotStarted = errors.New("not started") +) + +// Service defines a service that can be started, stopped, and reset. +type Service interface { + // Start the service. + // If it's already started or stopped, will return an error. + // If OnStart() returns an error, it's returned by Start() + Start() error + OnStart() error + + // Stop the service. + // If it's already stopped, will return an error. + // OnStop must never error. + Stop() error + OnStop() + + // Reset the service. + // Panics by default - must be overwritten to enable reset. + Reset() error + OnReset() error + + // Return true if the service is running + IsRunning() bool + + // Quit returns a channel, which is closed once service is stopped. + Quit() <-chan struct{} + + // String representation of the service + String() string + + // SetLogger sets a logger. + SetLogger(log.Logger) +} + +/* +Classical-inheritance-style service declarations. Services can be started, then +stopped, then optionally restarted. + +Users can override the OnStart/OnStop methods. In the absence of errors, these +methods are guaranteed to be called at most once. If OnStart returns an error, +service won't be marked as started, so the user can call Start again. + +A call to Reset will panic, unless OnReset is overwritten, allowing +OnStart/OnStop to be called again. + +The caller must ensure that Start and Stop are not called concurrently. + +It is ok to call Stop without calling Start first. + +Typical usage: + + type FooService struct { + BaseService + // private fields + } + + func NewFooService() *FooService { + fs := &FooService{ + // init + } + fs.BaseService = *NewBaseService(log, "FooService", fs) + return fs + } + + func (fs *FooService) OnStart() error { + fs.BaseService.OnStart() // Always call the overridden method. + // initialize private fields + // start subroutines, etc. + } + + func (fs *FooService) OnStop() error { + fs.BaseService.OnStop() // Always call the overridden method. + // close/destroy private fields + // stop subroutines, etc. + } +*/ +type BaseService struct { + Logger log.Logger + name string + started uint32 // atomic + stopped uint32 // atomic + quit chan struct{} + + // The "subclass" of BaseService + impl Service +} + +// NewBaseService creates a new BaseService. +func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { + if logger == nil { + logger = log.NewNopLogger() + } + + return &BaseService{ + Logger: logger, + name: name, + quit: make(chan struct{}), + impl: impl, + } +} + +// SetLogger implements Service by setting a logger. +func (bs *BaseService) SetLogger(l log.Logger) { + bs.Logger = l +} + +// Start implements Service by calling OnStart (if defined). An error will be +// returned if the service is already running or stopped. Not to start the +// stopped service, you need to call Reset. +func (bs *BaseService) Start() error { + if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { + if atomic.LoadUint32(&bs.stopped) == 1 { + bs.Logger.Error(fmt.Sprintf("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + // revert flag + atomic.StoreUint32(&bs.started, 0) + return ErrAlreadyStopped + } + bs.Logger.Info(fmt.Sprintf("Starting %v", bs.name), "impl", bs.impl) + err := bs.impl.OnStart() + if err != nil { + // revert flag + atomic.StoreUint32(&bs.started, 0) + return err + } + return nil + } + bs.Logger.Debug(fmt.Sprintf("Not starting %v -- already started", bs.name), "impl", bs.impl) + return ErrAlreadyStarted +} + +// OnStart implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStart() +func (bs *BaseService) OnStart() error { return nil } + +// Stop implements Service by calling OnStop (if defined) and closing quit +// channel. An error will be returned if the service is already stopped. +func (bs *BaseService) Stop() error { + if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { + if atomic.LoadUint32(&bs.started) == 0 { + bs.Logger.Error(fmt.Sprintf("Not stopping %v -- have not been started yet", bs.name), "impl", bs.impl) + // revert flag + atomic.StoreUint32(&bs.stopped, 0) + return ErrNotStarted + } + bs.Logger.Info(fmt.Sprintf("Stopping %v", bs.name), "impl", bs.impl) + bs.impl.OnStop() + close(bs.quit) + return nil + } + bs.Logger.Debug(fmt.Sprintf("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + return ErrAlreadyStopped +} + +// OnStop implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStop() +func (bs *BaseService) OnStop() {} + +// Reset implements Service by calling OnReset callback (if defined). An error +// will be returned if the service is running. +func (bs *BaseService) Reset() error { + if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { + bs.Logger.Debug(fmt.Sprintf("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + return fmt.Errorf("can't reset running %s", bs.name) + } + + // whether or not we've started, we can reset + atomic.CompareAndSwapUint32(&bs.started, 1, 0) + + bs.quit = make(chan struct{}) + return bs.impl.OnReset() +} + +// OnReset implements Service by panicking. +func (bs *BaseService) OnReset() error { + PanicSanity("The service cannot be reset") + return nil +} + +// IsRunning implements Service by returning true or false depending on the +// service's state. +func (bs *BaseService) IsRunning() bool { + return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 +} + +// Wait blocks until the service is stopped. +func (bs *BaseService) Wait() { + <-bs.quit +} + +// String implements Servce by returning a string representation of the service. +func (bs *BaseService) String() string { + return bs.name +} + +// Quit Implements Service by returning a quit channel. +func (bs *BaseService) Quit() <-chan struct{} { + return bs.quit +} diff --git a/chains/tendermint_34/libs/common/service_test.go b/chains/tendermint_34/libs/common/service_test.go new file mode 100755 index 0000000..ef360a6 --- /dev/null +++ b/chains/tendermint_34/libs/common/service_test.go @@ -0,0 +1,54 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type testService struct { + BaseService +} + +func (testService) OnReset() error { + return nil +} + +func TestBaseServiceWait(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + waitFinished := make(chan struct{}) + go func() { + ts.Wait() + waitFinished <- struct{}{} + }() + + go ts.Stop() + + select { + case <-waitFinished: + // all good + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") + } +} + +func TestBaseServiceReset(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + ts.Start() + + err := ts.Reset() + require.Error(t, err, "expected cant reset service error") + + ts.Stop() + + err = ts.Reset() + require.NoError(t, err) + + err = ts.Start() + require.NoError(t, err) +} diff --git a/chains/tendermint_34/libs/common/string.go b/chains/tendermint_34/libs/common/string.go new file mode 100755 index 0000000..ddf350b --- /dev/null +++ b/chains/tendermint_34/libs/common/string.go @@ -0,0 +1,76 @@ +package common + +import ( + "fmt" + "strings" +) + +// StringInSlice returns true if a is found the list. +func StringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// SplitAndTrim slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. +func SplitAndTrim(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + for i := 0; i < len(spl); i++ { + spl[i] = strings.Trim(spl[i], cutset) + } + return spl +} + +// Returns true if s is a non-empty printable non-tab ascii character. +func IsASCIIText(s string) bool { + if len(s) == 0 { + return false + } + for _, b := range []byte(s) { + if 32 <= b && b <= 126 { + // good + } else { + return false + } + } + return true +} + +// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. +func ASCIITrim(s string) string { + r := make([]byte, 0, len(s)) + for _, b := range []byte(s) { + if b == 32 { + continue // skip space + } else if 32 < b && b <= 126 { + r = append(r, b) + } else { + panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + } + } + return string(r) +} + +// StringSliceEqual checks if string slices a and b are equal +func StringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/chains/tendermint_34/libs/common/string_test.go b/chains/tendermint_34/libs/common/string_test.go new file mode 100755 index 0000000..35b6faf --- /dev/null +++ b/chains/tendermint_34/libs/common/string_test.go @@ -0,0 +1,58 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func TestStringInSlice(t *testing.T) { + assert.True(t, StringInSlice("a", []string{"a", "b", "c"})) + assert.False(t, StringInSlice("d", []string{"a", "b", "c"})) + assert.True(t, StringInSlice("", []string{""})) + assert.False(t, StringInSlice("", []string{})) +} + +func TestIsASCIIText(t *testing.T) { + notASCIIText := []string{ + "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + assert.False(t, IsASCIIText(v), "%q is not ascii-text", v) + } + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + assert.True(t, IsASCIIText(v), "%q is ascii-text", v) + } +} + +func TestASCIITrim(t *testing.T) { + assert.Equal(t, ASCIITrim(" "), "") + assert.Equal(t, ASCIITrim(" a"), "a") + assert.Equal(t, ASCIITrim("a "), "a") + assert.Equal(t, ASCIITrim(" a "), "a") + assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) +} + +func TestStringSliceEqual(t *testing.T) { + tests := []struct { + a []string + b []string + want bool + }{ + {[]string{"hello", "world"}, []string{"hello", "world"}, true}, + {[]string{"test"}, []string{"test"}, true}, + {[]string{"test1"}, []string{"test2"}, false}, + {[]string{"hello", "world."}, []string{"hello", "world!"}, false}, + {[]string{"only 1 word"}, []string{"two", "words!"}, false}, + {[]string{"two", "words!"}, []string{"only 1 word"}, false}, + } + for i, tt := range tests { + require.Equal(t, tt.want, StringSliceEqual(tt.a, tt.b), + "StringSliceEqual failed on test %d", i) + } +} diff --git a/chains/tendermint_34/libs/common/tempfile.go b/chains/tendermint_34/libs/common/tempfile.go new file mode 100755 index 0000000..a5bb7a5 --- /dev/null +++ b/chains/tendermint_34/libs/common/tempfile.go @@ -0,0 +1,128 @@ +package common + +import ( + fmt "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" +) + +const ( + atomicWriteFilePrefix = "write-file-atomic-" + // Maximum number of atomic write file conflicts before we start reseeding + // (reduced from golang's default 10 due to using an increased randomness space) + atomicWriteFileMaxNumConflicts = 5 + // Maximum number of attempts to make at writing the write file before giving up + // (reduced from golang's default 10000 due to using an increased randomness space) + atomicWriteFileMaxNumWriteAttempts = 1000 + // LCG constants from Donald Knuth MMIX + // This LCG's has a period equal to 2**64 + lcgA = 6364136223846793005 + lcgC = 1442695040888963407 + // Create in case it doesn't exist and force kernel + // flush, which still leaves the potential of lingering disk cache. + // Never overwrites files + atomicWriteFileFlag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC | os.O_EXCL +) + +var ( + atomicWriteFileRand uint64 + atomicWriteFileRandMu sync.Mutex +) + +func writeFileRandReseed() uint64 { + // Scale the PID, to minimize the chance that two processes seeded at similar times + // don't get the same seed. Note that PID typically ranges in [0, 2**15), but can be + // up to 2**22 under certain configurations. We left bit-shift the PID by 20, so that + // a PID difference of one corresponds to a time difference of 2048 seconds. + // The important thing here is that now for a seed conflict, they would both have to be on + // the correct nanosecond offset, and second-based offset, which is much less likely than + // just a conflict with the correct nanosecond offset. + return uint64(time.Now().UnixNano() + int64(os.Getpid()<<20)) +} + +// Use a fast thread safe LCG for atomic write file names. +// Returns a string corresponding to a 64 bit int. +// If it was a negative int, the leading number is a 0. +func randWriteFileSuffix() string { + atomicWriteFileRandMu.Lock() + r := atomicWriteFileRand + if r == 0 { + r = writeFileRandReseed() + } + + // Update randomness according to lcg + r = r*lcgA + lcgC + + atomicWriteFileRand = r + atomicWriteFileRandMu.Unlock() + // Can have a negative name, replace this in the following + suffix := strconv.Itoa(int(r)) + if string(suffix[0]) == "-" { + // Replace first "-" with "0". This is purely for UI clarity, + // as otherwhise there would be two `-` in a row. + suffix = strings.Replace(suffix, "-", "0", 1) + } + return suffix +} + +// WriteFileAtomic creates a temporary file with data and provided perm and +// swaps it atomically with filename if successful. +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) (err error) { + // This implementation is inspired by the golang stdlibs method of creating + // tempfiles. Notable differences are that we use different flags, a 64 bit LCG + // and handle negatives differently. + // The core reason we can't use golang's TempFile is that we must write + // to the file synchronously, as we need this to persist to disk. + // We also open it in write-only mode, to avoid concerns that arise with read. + var ( + dir = filepath.Dir(filename) + f *os.File + ) + + nconflict := 0 + // Limit the number of attempts to create a file. Something is seriously + // wrong if it didn't get created after 1000 attempts, and we don't want + // an infinite loop + i := 0 + for ; i < atomicWriteFileMaxNumWriteAttempts; i++ { + name := filepath.Join(dir, atomicWriteFilePrefix+randWriteFileSuffix()) + f, err = os.OpenFile(name, atomicWriteFileFlag, perm) + // If the file already exists, try a new file + if os.IsExist(err) { + // If the files exists too many times, start reseeding as we've + // likely hit another instances seed. + if nconflict++; nconflict > atomicWriteFileMaxNumConflicts { + atomicWriteFileRandMu.Lock() + atomicWriteFileRand = writeFileRandReseed() + atomicWriteFileRandMu.Unlock() + } + continue + } else if err != nil { + return err + } + break + } + if i == atomicWriteFileMaxNumWriteAttempts { + return fmt.Errorf("Could not create atomic write file after %d attempts", i) + } + + // Clean up in any case. Defer stacking order is last-in-first-out. + defer os.Remove(f.Name()) + defer f.Close() + + if n, err := f.Write(data); err != nil { + return err + } else if n < len(data) { + return io.ErrShortWrite + } + // Close the file before renaming it, otherwise it will cause "The process + // cannot access the file because it is being used by another process." on windows. + f.Close() + + return os.Rename(f.Name(), filename) +} diff --git a/chains/tendermint_34/libs/common/tempfile_test.go b/chains/tendermint_34/libs/common/tempfile_test.go new file mode 100755 index 0000000..51da909 --- /dev/null +++ b/chains/tendermint_34/libs/common/tempfile_test.go @@ -0,0 +1,138 @@ +package common + +// Need access to internal variables, so can't use _test package + +import ( + "bytes" + fmt "fmt" + "io/ioutil" + "os" + testing "testing" + + "github.com/stretchr/testify/require" +) + +func TestWriteFileAtomic(t *testing.T) { + var ( + data = []byte(RandStr(RandIntn(2048))) + old = RandBytes(RandIntn(2048)) + perm os.FileMode = 0600 + ) + + f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + if err = ioutil.WriteFile(f.Name(), old, 0664); err != nil { + t.Fatal(err) + } + + if err = WriteFileAtomic(f.Name(), data, perm); err != nil { + t.Fatal(err) + } + + rData, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(data, rData) { + t.Fatalf("data mismatch: %v != %v", data, rData) + } + + stat, err := os.Stat(f.Name()) + if err != nil { + t.Fatal(err) + } + + if have, want := stat.Mode().Perm(), perm; have != want { + t.Errorf("have %v, want %v", have, want) + } +} + +// This tests atomic write file when there is a single duplicate file. +// Expected behavior is for a new file to be created, and the original write file to be unaltered. +func TestWriteFileAtomicDuplicateFile(t *testing.T) { + var ( + defaultSeed uint64 = 1 + testString = "This is a glorious test string" + expectedString = "Did the test file's string appear here?" + + fileToWrite = "/tmp/TestWriteFileAtomicDuplicateFile-test.txt" + ) + // Create a file at the seed, and reset the seed. + atomicWriteFileRand = defaultSeed + firstFileRand := randWriteFileSuffix() + atomicWriteFileRand = defaultSeed + fname := "/tmp/" + atomicWriteFilePrefix + firstFileRand + f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) + defer os.Remove(fname) + // Defer here, in case there is a panic in WriteFileAtomic. + defer os.Remove(fileToWrite) + + require.Nil(t, err) + f.WriteString(testString) + WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + // Check that the first atomic file was untouched + firstAtomicFileBytes, err := ioutil.ReadFile(fname) + require.Nil(t, err, "Error reading first atomic file") + require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") + // Check that the resultant file is correct + resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + require.Nil(t, err, "Error reading resultant file") + require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") + + // Check that the intermediate write file was deleted + // Get the second write files' randomness + atomicWriteFileRand = defaultSeed + _ = randWriteFileSuffix() + secondFileRand := randWriteFileSuffix() + _, err = os.Stat("/tmp/" + atomicWriteFilePrefix + secondFileRand) + require.True(t, os.IsNotExist(err), "Intermittent atomic write file not deleted") +} + +// This tests atomic write file when there are many duplicate files. +// Expected behavior is for a new file to be created under a completely new seed, +// and the original write files to be unaltered. +func TestWriteFileAtomicManyDuplicates(t *testing.T) { + var ( + defaultSeed uint64 = 2 + testString = "This is a glorious test string, from file %d" + expectedString = "Did any of the test file's string appear here?" + + fileToWrite = "/tmp/TestWriteFileAtomicDuplicateFile-test.txt" + ) + // Initialize all of the atomic write files + atomicWriteFileRand = defaultSeed + for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { + fileRand := randWriteFileSuffix() + fname := "/tmp/" + atomicWriteFilePrefix + fileRand + f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) + require.Nil(t, err) + f.WriteString(fmt.Sprintf(testString, i)) + defer os.Remove(fname) + } + + atomicWriteFileRand = defaultSeed + // Defer here, in case there is a panic in WriteFileAtomic. + defer os.Remove(fileToWrite) + + WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + // Check that all intermittent atomic file were untouched + atomicWriteFileRand = defaultSeed + for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { + fileRand := randWriteFileSuffix() + fname := "/tmp/" + atomicWriteFilePrefix + fileRand + firstAtomicFileBytes, err := ioutil.ReadFile(fname) + require.Nil(t, err, "Error reading first atomic file") + require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, + "atomic write file %d was overwritten", i) + } + + // Check that the resultant file is correct + resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + require.Nil(t, err, "Error reading resultant file") + require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") +} diff --git a/chains/tendermint_34/libs/common/throttle_timer.go b/chains/tendermint_34/libs/common/throttle_timer.go new file mode 100755 index 0000000..38ef4e9 --- /dev/null +++ b/chains/tendermint_34/libs/common/throttle_timer.go @@ -0,0 +1,75 @@ +package common + +import ( + "sync" + "time" +) + +/* +ThrottleTimer fires an event at most "dur" after each .Set() call. +If a short burst of .Set() calls happens, ThrottleTimer fires once. +If a long continuous burst of .Set() calls happens, ThrottleTimer fires +at most once every "dur". +*/ +type ThrottleTimer struct { + Name string + Ch chan struct{} + quit chan struct{} + dur time.Duration + + mtx sync.Mutex + timer *time.Timer + isSet bool +} + +func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { + var ch = make(chan struct{}) + var quit = make(chan struct{}) + var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} + t.mtx.Lock() + t.timer = time.AfterFunc(dur, t.fireRoutine) + t.mtx.Unlock() + t.timer.Stop() + return t +} + +func (t *ThrottleTimer) fireRoutine() { + t.mtx.Lock() + defer t.mtx.Unlock() + select { + case t.Ch <- struct{}{}: + t.isSet = false + case <-t.quit: + // do nothing + default: + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Set() { + t.mtx.Lock() + defer t.mtx.Unlock() + if !t.isSet { + t.isSet = true + t.timer.Reset(t.dur) + } +} + +func (t *ThrottleTimer) Unset() { + t.mtx.Lock() + defer t.mtx.Unlock() + t.isSet = false + t.timer.Stop() +} + +// For ease of .Stop()'ing services before .Start()'ing them, +// we ignore .Stop()'s on nil ThrottleTimers +func (t *ThrottleTimer) Stop() bool { + if t == nil { + return false + } + close(t.quit) + t.mtx.Lock() + defer t.mtx.Unlock() + return t.timer.Stop() +} diff --git a/chains/tendermint_34/libs/common/throttle_timer_test.go b/chains/tendermint_34/libs/common/throttle_timer_test.go new file mode 100755 index 0000000..00f5abd --- /dev/null +++ b/chains/tendermint_34/libs/common/throttle_timer_test.go @@ -0,0 +1,78 @@ +package common + +import ( + "sync" + "testing" + "time" + + // make govet noshadow happy... + asrt "github.com/stretchr/testify/assert" +) + +type thCounter struct { + input chan struct{} + mtx sync.Mutex + count int +} + +func (c *thCounter) Increment() { + c.mtx.Lock() + c.count++ + c.mtx.Unlock() +} + +func (c *thCounter) Count() int { + c.mtx.Lock() + val := c.count + c.mtx.Unlock() + return val +} + +// Read should run in a go-routine and +// updates count by one every time a packet comes in +func (c *thCounter) Read() { + for range c.input { + c.Increment() + } +} + +func TestThrottle(test *testing.T) { + assert := asrt.New(test) + + ms := 50 + delay := time.Duration(ms) * time.Millisecond + longwait := time.Duration(2) * delay + t := NewThrottleTimer("foo", delay) + + // start at 0 + c := &thCounter{input: t.Ch} + assert.Equal(0, c.Count()) + go c.Read() + + // waiting does nothing + time.Sleep(longwait) + assert.Equal(0, c.Count()) + + // send one event adds one + t.Set() + time.Sleep(longwait) + assert.Equal(1, c.Count()) + + // send a burst adds one + for i := 0; i < 5; i++ { + t.Set() + } + time.Sleep(longwait) + assert.Equal(2, c.Count()) + + // send 12, over 2 delay sections, adds 3 + short := time.Duration(ms/5) * time.Millisecond + for i := 0; i < 13; i++ { + t.Set() + time.Sleep(short) + } + time.Sleep(longwait) + assert.Equal(5, c.Count()) + + close(t.Ch) +} diff --git a/chains/tendermint_34/libs/common/types.pb.go b/chains/tendermint_34/libs/common/types.pb.go new file mode 100755 index 0000000..716d28a --- /dev/null +++ b/chains/tendermint_34/libs/common/types.pb.go @@ -0,0 +1,771 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: libs/common/types.proto + +//nolint +package common + +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = golang_proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Define these here for compatibility but use libs/common.KVPair. +type KVPair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { + return fileDescriptor_types_611b4364a8604338, []int{0} +} +func (m *KVPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KVPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KVPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KVPair.Merge(dst, src) +} +func (m *KVPair) XXX_Size() int { + return m.Size() +} +func (m *KVPair) XXX_DiscardUnknown() { + xxx_messageInfo_KVPair.DiscardUnknown(m) +} + +var xxx_messageInfo_KVPair proto.InternalMessageInfo + +func (m *KVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Define these here for compatibility but use libs/common.KI64Pair. +type KI64Pair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { + return fileDescriptor_types_611b4364a8604338, []int{1} +} +func (m *KI64Pair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KI64Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KI64Pair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KI64Pair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KI64Pair.Merge(dst, src) +} +func (m *KI64Pair) XXX_Size() int { + return m.Size() +} +func (m *KI64Pair) XXX_DiscardUnknown() { + xxx_messageInfo_KI64Pair.DiscardUnknown(m) +} + +var xxx_messageInfo_KI64Pair proto.InternalMessageInfo + +func (m *KI64Pair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KI64Pair) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*KVPair)(nil), "common.KVPair") + golang_proto.RegisterType((*KVPair)(nil), "common.KVPair") + proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") + golang_proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") +} +func (this *KVPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*KVPair) + if !ok { + that2, ok := that.(KVPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *KI64Pair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*KI64Pair) + if !ok { + that2, ok := that.(KI64Pair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (m *KVPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KVPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *KI64Pair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KI64Pair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedKVPair(r randyTypes, easy bool) *KVPair { + this := &KVPair{} + v1 := r.Intn(100) + this.Key = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Key[i] = byte(r.Intn(256)) + } + v2 := r.Intn(100) + this.Value = make([]byte, v2) + for i := 0; i < v2; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedKI64Pair(r randyTypes, easy bool) *KI64Pair { + this := &KI64Pair{} + v3 := r.Intn(100) + this.Key = make([]byte, v3) + for i := 0; i < v3; i++ { + this.Key[i] = byte(r.Intn(256)) + } + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +type randyTypes interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneTypes(r randyTypes) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringTypes(r randyTypes) string { + v4 := r.Intn(100) + tmps := make([]rune, v4) + for i := 0; i < v4; i++ { + tmps[i] = randUTF8RuneTypes(r) + } + return string(tmps) +} +func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + v5 := r.Int63() + if r.Intn(2) == 0 { + v5 *= -1 + } + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v5)) + case 1: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *KVPair) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *KI64Pair) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovTypes(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KVPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KVPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KVPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KI64Pair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KI64Pair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KI64Pair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("libs/common/types.proto", fileDescriptor_types_611b4364a8604338) } +func init() { + golang_proto.RegisterFile("libs/common/types.proto", fileDescriptor_types_611b4364a8604338) +} + +var fileDescriptor_types_611b4364a8604338 = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0xc9, 0x4c, 0x2a, + 0xd6, 0x4f, 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, + 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, + 0x30, 0x07, 0xcc, 0x82, 0x68, 0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, + 0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, + 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, + 0x17, 0x87, 0xb7, 0xa7, 0x99, 0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x9c, 0x64, 0x7e, 0x3c, 0x94, + 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0x71, 0xc7, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc0, 0x63, 0x39, 0xc6, 0x24, 0x36, 0xb0, 0x53, 0x8c, + 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x39, 0xe1, 0xef, 0xdc, 0x00, 0x00, 0x00, +} diff --git a/chains/tendermint_34/libs/common/types.proto b/chains/tendermint_34/libs/common/types.proto new file mode 100755 index 0000000..518e7ca --- /dev/null +++ b/chains/tendermint_34/libs/common/types.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package common; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.goproto_registration) = true; +// Generate tests +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.testgen_all) = true; + +//---------------------------------------- +// Abstract types + +// Define these here for compatibility but use tmlibs/common.KVPair. +message KVPair { + bytes key = 1; + bytes value = 2; +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +message KI64Pair { + bytes key = 1; + int64 value = 2; +} diff --git a/chains/tendermint_34/libs/common/typespb_test.go b/chains/tendermint_34/libs/common/typespb_test.go new file mode 100755 index 0000000..439cc12 --- /dev/null +++ b/chains/tendermint_34/libs/common/typespb_test.go @@ -0,0 +1,271 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: libs/common/types.proto + +package common + +import testing "testing" +import math_rand "math/rand" +import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = golang_proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func TestKVPairProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KVPair{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestKVPairMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KVPair{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKI64PairProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KI64Pair{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestKI64PairMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KI64Pair{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKVPairJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KVPair{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestKI64PairJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &KI64Pair{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestKVPairProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &KVPair{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKVPairProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &KVPair{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKI64PairProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &KI64Pair{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKI64PairProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &KI64Pair{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestKVPairSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKVPair(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestKI64PairSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedKI64Pair(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/chains/tendermint_34/libs/errors/errors.go b/chains/tendermint_34/libs/errors/errors.go new file mode 100755 index 0000000..a033827 --- /dev/null +++ b/chains/tendermint_34/libs/errors/errors.go @@ -0,0 +1,21 @@ +// Package errors contains errors that are thrown across packages. +package errors + +// // ErrPermissionsChanged occurs if the file permission have changed since the file was created. +// type ErrPermissionsChanged struct { +// name string +// got, want os.FileMode +// } + +// func NewErrPermissionsChanged(name string, got, want os.FileMode) *ErrPermissionsChanged { +// return &ErrPermissionsChanged{name: name, got: got, want: want} +// } + +// func (e ErrPermissionsChanged) Error() string { +// return fmt.Sprintf( +// "file: [%v]\nexpected file permissions: %v, got: %v", +// e.name, +// e.want, +// e.got, +// ) +// } diff --git a/chains/tendermint_34/libs/events/Makefile b/chains/tendermint_34/libs/events/Makefile new file mode 100755 index 0000000..696aaff --- /dev/null +++ b/chains/tendermint_34/libs/events/Makefile @@ -0,0 +1,9 @@ +.PHONY: docs +REPO:=github.com/tendermint/tendermint/libs/events + +docs: + @go get github.com/davecheney/godoc2md + godoc2md $(REPO) > README.md + +test: + go test -v ./... diff --git a/chains/tendermint_34/libs/events/README.md b/chains/tendermint_34/libs/events/README.md new file mode 100755 index 0000000..14aa498 --- /dev/null +++ b/chains/tendermint_34/libs/events/README.md @@ -0,0 +1,175 @@ + + +# events +`import "github.com/tendermint/tendermint/libs/events"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) + +## Overview +Pub-Sub in go with event caching + + + + +## Index +* [type EventCache](#EventCache) + * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) + * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) + * [func (evc *EventCache) Flush()](#EventCache.Flush) +* [type EventCallback](#EventCallback) +* [type EventData](#EventData) +* [type EventSwitch](#EventSwitch) + * [func NewEventSwitch() EventSwitch](#NewEventSwitch) +* [type Eventable](#Eventable) +* [type Fireable](#Fireable) + + +#### Package files +[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) + + + + + + +## type [EventCache](/src/target/event_cache.go?s=116:179#L5) +``` go +type EventCache struct { + // contains filtered or unexported fields +} +``` +An EventCache buffers events for a Fireable +All events are cached. Filtering happens on Flush + + + + + + + +### func [NewEventCache](/src/target/event_cache.go?s=239:284#L11) +``` go +func NewEventCache(evsw Fireable) *EventCache +``` +Create a new EventCache with an EventSwitch as backend + + + + + +### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24) +``` go +func (evc *EventCache) FireEvent(event string, data EventData) +``` +Cache an event to be fired upon finality. + + + + +### func (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31) +``` go +func (evc *EventCache) Flush() +``` +Fire events by running evsw.FireEvent on all cached events. Blocks. +Clears cached events + + + + +## type [EventCallback](/src/target/events.go?s=4201:4240#L185) +``` go +type EventCallback func(data EventData) +``` + + + + + + + + + +## type [EventData](/src/target/events.go?s=243:294#L14) +``` go +type EventData interface { +} +``` +Generic event data can be typed and registered with tendermint/go-amino +via concrete implementation of this interface + + + + + + + + + + +## type [EventSwitch](/src/target/events.go?s=560:771#L29) +``` go +type EventSwitch interface { + cmn.Service + Fireable + + AddListenerForEvent(listenerID, event string, cb EventCallback) + RemoveListenerForEvent(event string, listenerID string) + RemoveListener(listenerID string) +} +``` + + + + + + +### func [NewEventSwitch](/src/target/events.go?s=917:950#L46) +``` go +func NewEventSwitch() EventSwitch +``` + + + + +## type [Eventable](/src/target/events.go?s=378:440#L20) +``` go +type Eventable interface { + SetEventSwitch(evsw EventSwitch) +} +``` +reactors and other modules should export +this interface to become eventable + + + + + + + + + + +## type [Fireable](/src/target/events.go?s=490:558#L25) +``` go +type Fireable interface { + FireEvent(event string, data EventData) +} +``` +an event switch or cache implements fireable + + + + + + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/chains/tendermint_34/libs/events/event_cache.go b/chains/tendermint_34/libs/events/event_cache.go new file mode 100755 index 0000000..f508e87 --- /dev/null +++ b/chains/tendermint_34/libs/events/event_cache.go @@ -0,0 +1,37 @@ +package events + +// An EventCache buffers events for a Fireable +// All events are cached. Filtering happens on Flush +type EventCache struct { + evsw Fireable + events []eventInfo +} + +// Create a new EventCache with an EventSwitch as backend +func NewEventCache(evsw Fireable) *EventCache { + return &EventCache{ + evsw: evsw, + } +} + +// a cached event +type eventInfo struct { + event string + data EventData +} + +// Cache an event to be fired upon finality. +func (evc *EventCache) FireEvent(event string, data EventData) { + // append to list (go will grow our backing array exponentially) + evc.events = append(evc.events, eventInfo{event, data}) +} + +// Fire events by running evsw.FireEvent on all cached events. Blocks. +// Clears cached events +func (evc *EventCache) Flush() { + for _, ei := range evc.events { + evc.evsw.FireEvent(ei.event, ei.data) + } + // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation + evc.events = nil +} diff --git a/chains/tendermint_34/libs/events/event_cache_test.go b/chains/tendermint_34/libs/events/event_cache_test.go new file mode 100755 index 0000000..ab321da --- /dev/null +++ b/chains/tendermint_34/libs/events/event_cache_test.go @@ -0,0 +1,35 @@ +package events + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEventCache_Flush(t *testing.T) { + evsw := NewEventSwitch() + evsw.Start() + evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + // Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache + require.FailNow(t, "We should never receive a message on this switch since none are fired") + }) + evc := NewEventCache(evsw) + evc.Flush() + // Check after reset + evc.Flush() + fail := true + pass := false + evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + if fail { + require.FailNow(t, "Shouldn't see a message until flushed") + } + pass = true + }) + evc.FireEvent("something", struct{ int }{1}) + evc.FireEvent("something", struct{ int }{2}) + evc.FireEvent("something", struct{ int }{3}) + fail = false + evc.Flush() + assert.True(t, pass) +} diff --git a/chains/tendermint_34/libs/events/events.go b/chains/tendermint_34/libs/events/events.go new file mode 100755 index 0000000..fb90bbe --- /dev/null +++ b/chains/tendermint_34/libs/events/events.go @@ -0,0 +1,247 @@ +// Package events - Pub-Sub in go with event caching +package events + +import ( + "fmt" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// ErrListenerWasRemoved is returned by AddEvent if the listener was removed. +type ErrListenerWasRemoved struct { + listenerID string +} + +// Error implements the error interface. +func (e ErrListenerWasRemoved) Error() string { + return fmt.Sprintf("listener #%s was removed", e.listenerID) +} + +// EventData is a generic event data can be typed and registered with +// tendermint/go-amino via concrete implementation of this interface. +type EventData interface{} + +// Eventable is the interface reactors and other modules must export to become +// eventable. +type Eventable interface { + SetEventSwitch(evsw EventSwitch) +} + +// Fireable is the interface that wraps the FireEvent method. +// +// FireEvent fires an event with the given name and data. +type Fireable interface { + FireEvent(event string, data EventData) +} + +// EventSwitch is the interface for synchronous pubsub, where listeners +// subscribe to certain events and, when an event is fired (see Fireable), +// notified via a callback function. +// +// Listeners are added by calling AddListenerForEvent function. +// They can be removed by calling either RemoveListenerForEvent or +// RemoveListener (for all events). +type EventSwitch interface { + cmn.Service + Fireable + + AddListenerForEvent(listenerID, event string, cb EventCallback) error + RemoveListenerForEvent(event string, listenerID string) + RemoveListener(listenerID string) +} + +type eventSwitch struct { + cmn.BaseService + + mtx sync.RWMutex + eventCells map[string]*eventCell + listeners map[string]*eventListener +} + +func NewEventSwitch() EventSwitch { + evsw := &eventSwitch{ + eventCells: make(map[string]*eventCell), + listeners: make(map[string]*eventListener), + } + evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw) + return evsw +} + +func (evsw *eventSwitch) OnStart() error { + return nil +} + +func (evsw *eventSwitch) OnStop() {} + +func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) error { + // Get/Create eventCell and listener. + evsw.mtx.Lock() + eventCell := evsw.eventCells[event] + if eventCell == nil { + eventCell = newEventCell() + evsw.eventCells[event] = eventCell + } + listener := evsw.listeners[listenerID] + if listener == nil { + listener = newEventListener(listenerID) + evsw.listeners[listenerID] = listener + } + evsw.mtx.Unlock() + + // Add event and listener. + if err := listener.AddEvent(event); err != nil { + return err + } + eventCell.AddListener(listenerID, cb) + + return nil +} + +func (evsw *eventSwitch) RemoveListener(listenerID string) { + // Get and remove listener. + evsw.mtx.RLock() + listener := evsw.listeners[listenerID] + evsw.mtx.RUnlock() + if listener == nil { + return + } + + evsw.mtx.Lock() + delete(evsw.listeners, listenerID) + evsw.mtx.Unlock() + + // Remove callback for each event. + listener.SetRemoved() + for _, event := range listener.GetEvents() { + evsw.RemoveListenerForEvent(event, listenerID) + } +} + +func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) { + // Get eventCell + evsw.mtx.Lock() + eventCell := evsw.eventCells[event] + evsw.mtx.Unlock() + + if eventCell == nil { + return + } + + // Remove listenerID from eventCell + numListeners := eventCell.RemoveListener(listenerID) + + // Maybe garbage collect eventCell. + if numListeners == 0 { + // Lock again and double check. + evsw.mtx.Lock() // OUTER LOCK + eventCell.mtx.Lock() // INNER LOCK + if len(eventCell.listeners) == 0 { + delete(evsw.eventCells, event) + } + eventCell.mtx.Unlock() // INNER LOCK + evsw.mtx.Unlock() // OUTER LOCK + } +} + +func (evsw *eventSwitch) FireEvent(event string, data EventData) { + // Get the eventCell + evsw.mtx.RLock() + eventCell := evsw.eventCells[event] + evsw.mtx.RUnlock() + + if eventCell == nil { + return + } + + // Fire event for all listeners in eventCell + eventCell.FireEvent(data) +} + +//----------------------------------------------------------------------------- + +// eventCell handles keeping track of listener callbacks for a given event. +type eventCell struct { + mtx sync.RWMutex + listeners map[string]EventCallback +} + +func newEventCell() *eventCell { + return &eventCell{ + listeners: make(map[string]EventCallback), + } +} + +func (cell *eventCell) AddListener(listenerID string, cb EventCallback) { + cell.mtx.Lock() + cell.listeners[listenerID] = cb + cell.mtx.Unlock() +} + +func (cell *eventCell) RemoveListener(listenerID string) int { + cell.mtx.Lock() + delete(cell.listeners, listenerID) + numListeners := len(cell.listeners) + cell.mtx.Unlock() + return numListeners +} + +func (cell *eventCell) FireEvent(data EventData) { + cell.mtx.RLock() + var eventCallbacks []EventCallback + for _, cb := range cell.listeners { + eventCallbacks = append(eventCallbacks, cb) + } + cell.mtx.RUnlock() + + for _, cb := range eventCallbacks { + cb(data) + } +} + +//----------------------------------------------------------------------------- + +type EventCallback func(data EventData) + +type eventListener struct { + id string + + mtx sync.RWMutex + removed bool + events []string +} + +func newEventListener(id string) *eventListener { + return &eventListener{ + id: id, + removed: false, + events: nil, + } +} + +func (evl *eventListener) AddEvent(event string) error { + evl.mtx.Lock() + + if evl.removed { + evl.mtx.Unlock() + return ErrListenerWasRemoved{listenerID: evl.id} + } + + evl.events = append(evl.events, event) + evl.mtx.Unlock() + return nil +} + +func (evl *eventListener) GetEvents() []string { + evl.mtx.RLock() + events := make([]string, len(evl.events)) + copy(events, evl.events) + evl.mtx.RUnlock() + return events +} + +func (evl *eventListener) SetRemoved() { + evl.mtx.Lock() + evl.removed = true + evl.mtx.Unlock() +} diff --git a/chains/tendermint_34/libs/events/events_test.go b/chains/tendermint_34/libs/events/events_test.go new file mode 100755 index 0000000..8d87986 --- /dev/null +++ b/chains/tendermint_34/libs/events/events_test.go @@ -0,0 +1,430 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single +// listener to an event, and sends a string "data". +func TestAddListenerForEventFireOnce(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + messages := make(chan EventData) + evsw.AddListenerForEvent("listener", "event", + func(data EventData) { + // test there's no deadlock if we remove the listener inside a callback + evsw.RemoveListener("listener") + messages <- data + }) + go evsw.FireEvent("event", "data") + received := <-messages + if received != "data" { + t.Errorf("Message received does not match: %v", received) + } +} + +// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single +// listener to an event, and sends a thousand integers. +func TestAddListenerForEventFireMany(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + doneSum := make(chan uint64) + doneSending := make(chan uint64) + numbers := make(chan uint64, 4) + // subscribe one listener for one event + evsw.AddListenerForEvent("listener", "event", + func(data EventData) { + numbers <- data.(uint64) + }) + // collect received events + go sumReceivedNumbers(numbers, doneSum) + // go fire events + go fireEvents(evsw, "event", doneSending, uint64(1)) + checkSum := <-doneSending + close(numbers) + eventSum := <-doneSum + if checkSum != eventSum { + t.Errorf("Not all messages sent were received.\n") + } +} + +// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single +// listener to three different events and sends a thousand integers for each +// of the three events. +func TestAddListenerForDifferentEvents(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + doneSum := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers := make(chan uint64, 4) + // subscribe one listener to three events + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + numbers <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + numbers <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event3", + func(data EventData) { + numbers <- data.(uint64) + }) + // collect received events + go sumReceivedNumbers(numbers, doneSum) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(evsw, "event2", doneSending2, uint64(1)) + go fireEvents(evsw, "event3", doneSending3, uint64(1)) + var checkSum uint64 = 0 + checkSum += <-doneSending1 + checkSum += <-doneSending2 + checkSum += <-doneSending3 + close(numbers) + eventSum := <-doneSum + if checkSum != eventSum { + t.Errorf("Not all messages sent were received.\n") + } +} + +// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch, +// subscribes a first listener to three events, and subscribes a second +// listener to two of those three events, and then sends a thousand integers +// for each of the three events. +func TestAddDifferentListenerForDifferentEvents(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for listener1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for listener2 + go sumReceivedNumbers(numbers2, doneSum2) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + checkSumEvent1 := <-doneSending1 + checkSumEvent2 := <-doneSending2 + checkSumEvent3 := <-doneSending3 + checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 + checkSum2 := checkSumEvent2 + checkSumEvent3 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSum1 != eventSum1 || + checkSum2 != eventSum2 { + t.Errorf("Not all messages sent were received for different listeners to different events.\n") + } +} + +func TestAddAndRemoveListenerConcurrency(t *testing.T) { + var ( + stopInputEvent = false + roundCount = 2000 + ) + + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + done1 := make(chan struct{}) + done2 := make(chan struct{}) + + // Must be executed concurrently to uncover the data race. + // 1. RemoveListener + go func() { + for i := 0; i < roundCount; i++ { + evsw.RemoveListener("listener") + } + close(done1) + }() + + // 2. AddListenerForEvent + go func() { + for i := 0; i < roundCount; i++ { + index := i + evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), + func(data EventData) { + t.Errorf("should not run callback for %d.\n", index) + stopInputEvent = true + }) + } + close(done2) + }() + + <-done1 + <-done2 + + evsw.RemoveListener("listener") // remove the last listener + + for i := 0; i < roundCount && !stopInputEvent; i++ { + evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) + } +} + +// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to +// two events, fires a thousand integers for the first event, then unsubscribes +// the listener and fires a thousand integers for the second event. +func TestAddAndRemoveListener(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for event1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for event2 + go sumReceivedNumbers(numbers2, doneSum2) + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + checkSumEvent1 := <-doneSending1 + // after sending all event1, unsubscribe for all events + evsw.RemoveListener("listener") + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + checkSumEvent2 := <-doneSending2 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSumEvent1 != eventSum1 || + // correct value asserted by preceding tests, suffices to be non-zero + checkSumEvent2 == uint64(0) || + eventSum2 != uint64(0) { + t.Errorf("Not all messages sent were received or unsubscription did not register.\n") + } +} + +// TestRemoveListener does basic tests on adding and removing +func TestRemoveListener(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + count := 10 + sum1, sum2 := 0, 0 + // add some listeners and make sure they work + evsw.AddListenerForEvent("listener", "event1", + func(data EventData) { + sum1++ + }) + evsw.AddListenerForEvent("listener", "event2", + func(data EventData) { + sum2++ + }) + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count, sum1) + assert.Equal(t, count, sum2) + + // remove one by event and make sure it is gone + evsw.RemoveListenerForEvent("event2", "listener") + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count*2, sum1) + assert.Equal(t, count, sum2) + + // remove the listener entirely and make sure both gone + evsw.RemoveListener("listener") + for i := 0; i < count; i++ { + evsw.FireEvent("event1", true) + evsw.FireEvent("event2", true) + } + assert.Equal(t, count*2, sum1) + assert.Equal(t, count, sum2) +} + +// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two +// listeners to three events, and fires a thousand integers for each event. +// These two listeners serve as the baseline validation while other listeners +// are randomly subscribed and unsubscribed. +// More precisely it randomly subscribes new listeners (different from the first +// two listeners) to one of these three events. At the same time it starts +// randomly unsubscribing these additional listeners from all events they are +// at that point subscribed to. +// NOTE: it is important to run this test with race conditions tracking on, +// `go test -race`, to examine for possible race conditions. +func TestRemoveListenersAsync(t *testing.T) { + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + doneSum1 := make(chan uint64) + doneSum2 := make(chan uint64) + doneSending1 := make(chan uint64) + doneSending2 := make(chan uint64) + doneSending3 := make(chan uint64) + numbers1 := make(chan uint64, 4) + numbers2 := make(chan uint64, 4) + // subscribe two listener to three events + evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) { + numbers1 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event1", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) { + numbers2 <- data.(uint64) + }) + evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) { + numbers2 <- data.(uint64) + }) + // collect received events for event1 + go sumReceivedNumbers(numbers1, doneSum1) + // collect received events for event2 + go sumReceivedNumbers(numbers2, doneSum2) + addListenersStress := func() { + r1 := cmn.NewRand() + r1.Seed(time.Now().UnixNano()) + for k := uint16(0); k < 400; k++ { + listenerNumber := r1.Intn(100) + 3 + eventNumber := r1.Intn(3) + 1 + go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), + fmt.Sprintf("event%v", eventNumber), + func(_ EventData) {}) + } + } + removeListenersStress := func() { + r2 := cmn.NewRand() + r2.Seed(time.Now().UnixNano()) + for k := uint16(0); k < 80; k++ { + listenerNumber := r2.Intn(100) + 3 + go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) + } + } + addListenersStress() + // go fire events + go fireEvents(evsw, "event1", doneSending1, uint64(1)) + removeListenersStress() + go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + checkSumEvent1 := <-doneSending1 + checkSumEvent2 := <-doneSending2 + checkSumEvent3 := <-doneSending3 + checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 + close(numbers1) + close(numbers2) + eventSum1 := <-doneSum1 + eventSum2 := <-doneSum2 + if checkSum != eventSum1 || + checkSum != eventSum2 { + t.Errorf("Not all messages sent were received.\n") + } +} + +//------------------------------------------------------------------------------ +// Helper functions + +// sumReceivedNumbers takes two channels and adds all numbers received +// until the receiving channel `numbers` is closed; it then sends the sum +// on `doneSum` and closes that channel. Expected to be run in a go-routine. +func sumReceivedNumbers(numbers, doneSum chan uint64) { + var sum uint64 + for { + j, more := <-numbers + sum += j + if !more { + doneSum <- sum + close(doneSum) + return + } + } +} + +// fireEvents takes an EventSwitch and fires a thousand integers under +// a given `event` with the integers mootonically increasing from `offset` +// to `offset` + 999. It additionally returns the addition of all integers +// sent on `doneChan` for assertion that all events have been sent, and enabling +// the test to assert all events have also been received. +func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, + offset uint64) { + var sentSum uint64 + for i := offset; i <= offset+uint64(999); i++ { + sentSum += i + evsw.FireEvent(event, i) + } + doneChan <- sentSum + close(doneChan) +} diff --git a/chains/tendermint_34/libs/fail/fail.go b/chains/tendermint_34/libs/fail/fail.go new file mode 100755 index 0000000..d7912af --- /dev/null +++ b/chains/tendermint_34/libs/fail/fail.go @@ -0,0 +1,79 @@ +package fail + +import ( + "fmt" + "math/rand" + "os" + "strconv" +) + +var callIndexToFail int + +func init() { + callIndexToFailS := os.Getenv("FAIL_TEST_INDEX") + + if callIndexToFailS == "" { + callIndexToFail = -1 + } else { + var err error + callIndexToFail, err = strconv.Atoi(callIndexToFailS) + if err != nil { + callIndexToFail = -1 + } + } +} + +// Fail when FAIL_TEST_INDEX == callIndex +var ( + callIndex int //indexes Fail calls + + callRandIndex int // indexes a run of FailRand calls + callRandIndexToFail = -1 // the callRandIndex to fail on in FailRand +) + +func Fail() { + if callIndexToFail < 0 { + return + } + + if callIndex == callIndexToFail { + Exit() + } + + callIndex += 1 +} + +// FailRand should be called n successive times. +// It will fail on a random one of those calls +// n must be greater than 0 +func FailRand(n int) { + if callIndexToFail < 0 { + return + } + + if callRandIndexToFail < 0 { + // first call in the loop, pick a random index to fail at + callRandIndexToFail = rand.Intn(n) + callRandIndex = 0 + } + + if callIndex == callIndexToFail { + if callRandIndex == callRandIndexToFail { + Exit() + } + } + + callRandIndex += 1 + + if callRandIndex == n { + callIndex += 1 + } +} + +func Exit() { + fmt.Printf("*** fail-test %d ***\n", callIndex) + os.Exit(1) + // proc, _ := os.FindProcess(os.Getpid()) + // proc.Signal(os.Interrupt) + // panic(fmt.Sprintf("*** fail-test %d ***", callIndex)) +} diff --git a/chains/tendermint_34/libs/flowrate/README.md b/chains/tendermint_34/libs/flowrate/README.md new file mode 100755 index 0000000..db42809 --- /dev/null +++ b/chains/tendermint_34/libs/flowrate/README.md @@ -0,0 +1,10 @@ +Data Flow Rate Control +====================== + +To download and install this package run: + +go get github.com/mxk/go-flowrate/flowrate + +The documentation is available at: + +http://godoc.org/github.com/mxk/go-flowrate/flowrate diff --git a/chains/tendermint_34/libs/flowrate/flowrate.go b/chains/tendermint_34/libs/flowrate/flowrate.go new file mode 100755 index 0000000..e233eae --- /dev/null +++ b/chains/tendermint_34/libs/flowrate/flowrate.go @@ -0,0 +1,275 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// Hack to set the current rEMA. +func (m *Monitor) SetREMA(rEMA float64) { + m.mu.Lock() + m.rEMA = rEMA + m.samples++ + m.mu.Unlock() +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/chains/tendermint_34/libs/flowrate/io.go b/chains/tendermint_34/libs/flowrate/io.go new file mode 100755 index 0000000..fbe0909 --- /dev/null +++ b/chains/tendermint_34/libs/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/chains/tendermint_34/libs/flowrate/io_test.go b/chains/tendermint_34/libs/flowrate/io_test.go new file mode 100755 index 0000000..ab2c712 --- /dev/null +++ b/chains/tendermint_34/libs/flowrate/io_test.go @@ -0,0 +1,194 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "bytes" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + {true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0}, + {true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0}, + {true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0}, + {false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + {false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +func TestWriter(t *testing.T) { + b := make([]byte, 100) + for i := range b { + b[i] = byte(i) + } + w := NewWriter(&bytes.Buffer{}, 200) + start := time.Now() + + // Make sure w implements Limiter + _ = Limiter(w) + + // Non-blocking 20-byte write for the first sample returns ErrLimit + w.SetBlocking(false) + if n, err := w.Write(b); n != 20 || err != ErrLimit { + t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + + // Blocking 80-byte write + w.SetBlocking(true) + if n, err := w.Write(b[20:]); n != 80 || err != nil { + t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _300ms { + // Explanation for `rt < _300ms` (as opposed to `< _400ms`) + // + // |<-- start | | + // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms + // sends: 20|20 |20 |20 |20# + // + // NOTE: The '#' symbol can thus happen before 400ms is up. + // Thus, we can only panic if rt < _300ms. + t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) + } + + w.SetTransferSize(100) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + {true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000}, + {true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, + } + for i, s := range status { + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) + } + } + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } +} + +const maxDeviationForDuration = 50 * time.Millisecond +const maxDeviationForRate int64 = 50 + +// statusesAreEqual returns true if s1 is equal to s2. Equality here means +// general equality of fields except for the duration and rates, which can +// drift due to unpredictable delays (e.g. thread wakes up 25ms after +// `time.Sleep` has ended). +func statusesAreEqual(s1 *Status, s2 *Status) bool { + if s1.Active == s2.Active && + s1.Start == s2.Start && + durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && + s1.Idle == s2.Idle && + s1.Bytes == s2.Bytes && + s1.Samples == s2.Samples && + ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && + ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && + ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && + ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && + s1.BytesRem == s2.BytesRem && + durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && + s1.Progress == s2.Progress { + return true + } + return false +} + +func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { + return d2-d1 <= maxDeviation +} + +func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { + sub := r1 - r2 + if sub < 0 { + sub = -sub + } + if sub <= maxDeviation { + return true + } + return false +} diff --git a/chains/tendermint_34/libs/flowrate/util.go b/chains/tendermint_34/libs/flowrate/util.go new file mode 100755 index 0000000..b33ddc7 --- /dev/null +++ b/chains/tendermint_34/libs/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Now().Round(clockRate) + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Now().Round(clockRate).Sub(czero) +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return czero.Add(c) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/chains/tendermint_34/libs/log/filter.go b/chains/tendermint_34/libs/log/filter.go new file mode 100755 index 0000000..b71447e --- /dev/null +++ b/chains/tendermint_34/libs/log/filter.go @@ -0,0 +1,191 @@ +package log + +import "fmt" + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelError +) + +type filter struct { + next Logger + allowed level // XOR'd levels for default case + initiallyAllowed level // XOR'd levels for initial case + allowedKeyvals map[keyval]level // When key-value match, use this level +} + +type keyval struct { + key interface{} + value interface{} +} + +// NewFilter wraps next and implements filtering. See the commentary on the +// Option functions for a detailed description of how to configure levels. If +// no options are provided, all leveled log events created with Debug, Info or +// Error helper methods are squelched. +func NewFilter(next Logger, options ...Option) Logger { + l := &filter{ + next: next, + allowedKeyvals: make(map[keyval]level), + } + for _, option := range options { + option(l) + } + l.initiallyAllowed = l.allowed + return l +} + +func (l *filter) Info(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelInfo != 0 + if !levelAllowed { + return + } + l.next.Info(msg, keyvals...) +} + +func (l *filter) Debug(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelDebug != 0 + if !levelAllowed { + return + } + l.next.Debug(msg, keyvals...) +} + +func (l *filter) Error(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelError != 0 + if !levelAllowed { + return + } + l.next.Error(msg, keyvals...) +} + +// With implements Logger by constructing a new filter with a keyvals appended +// to the logger. +// +// If custom level was set for a keyval pair using one of the +// Allow*With methods, it is used as the logger's level. +// +// Examples: +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" +func (l *filter) With(keyvals ...interface{}) Logger { + keyInAllowedKeyvals := false + + for i := len(keyvals) - 2; i >= 0; i -= 2 { + for kv, allowed := range l.allowedKeyvals { + if keyvals[i] == kv.key { + keyInAllowedKeyvals = true + // Example: + // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) + // logger.With("module", "crypto") + if keyvals[i+1] == kv.value { + return &filter{ + next: l.next.With(keyvals...), + allowed: allowed, // set the desired level + allowedKeyvals: l.allowedKeyvals, + initiallyAllowed: l.initiallyAllowed, + } + } + } + } + } + + // Example: + // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) + // logger.With("module", "main") + if keyInAllowedKeyvals { + return &filter{ + next: l.next.With(keyvals...), + allowed: l.initiallyAllowed, // return back to initially allowed + allowedKeyvals: l.allowedKeyvals, + initiallyAllowed: l.initiallyAllowed, + } + } + + return &filter{ + next: l.next.With(keyvals...), + allowed: l.allowed, // simply continue with the current level + allowedKeyvals: l.allowedKeyvals, + initiallyAllowed: l.initiallyAllowed, + } +} + +//-------------------------------------------------------------------------------- + +// Option sets a parameter for the filter. +type Option func(*filter) + +// AllowLevel returns an option for the given level or error if no option exist +// for such level. +func AllowLevel(lvl string) (Option, error) { + switch lvl { + case "debug": + return AllowDebug(), nil + case "info": + return AllowInfo(), nil + case "error": + return AllowError(), nil + case "none": + return AllowNone(), nil + default: + return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) + } +} + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelInfo | levelDebug) +} + +// AllowInfo allows error and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelInfo) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *filter) { l.allowed = allowed } +} + +// AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. +func AllowDebugWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } +} + +// AllowInfoWith allows error and info level log events to pass for a specific key value pair. +func AllowInfoWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } +} + +// AllowErrorWith allows only error level log events to pass for a specific key value pair. +func AllowErrorWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +} + +// AllowNoneWith allows no leveled log events to pass for a specific key value pair. +func AllowNoneWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } +} diff --git a/chains/tendermint_34/libs/log/filter_test.go b/chains/tendermint_34/libs/log/filter_test.go new file mode 100755 index 0000000..f9957f0 --- /dev/null +++ b/chains/tendermint_34/libs/log/filter_test.go @@ -0,0 +1,118 @@ +package log_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/tendermint/tendermint/libs/log" +) + +func TestVariousLevels(t *testing.T) { + testCases := []struct { + name string + allowed log.Option + want string + }{ + { + "AllowAll", + log.AllowAll(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowDebug", + log.AllowDebug(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowInfo", + log.AllowInfo(), + strings.Join([]string{ + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowError", + log.AllowError(), + strings.Join([]string{ + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowNone", + log.AllowNone(), + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + logger := log.NewFilter(log.NewTMJSONLogger(&buf), tc.allowed) + + logger.Debug("here", "this is", "debug log") + logger.Info("here", "this is", "info log") + logger.Error("here", "this is", "error log") + + if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) + } + }) + } +} + +func TestLevelContext(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + logger = log.NewFilter(logger, log.AllowError()) + logger = logger.With("context", "value") + + logger.Error("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + logger.Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestVariousAllowWith(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + + logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) + logger1.With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger2 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger3 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) + logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/chains/tendermint_34/libs/log/logger.go b/chains/tendermint_34/libs/log/logger.go new file mode 100755 index 0000000..ddb187b --- /dev/null +++ b/chains/tendermint_34/libs/log/logger.go @@ -0,0 +1,30 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// Logger is what any Tendermint library should take. +type Logger interface { + Debug(msg string, keyvals ...interface{}) + Info(msg string, keyvals ...interface{}) + Error(msg string, keyvals ...interface{}) + + With(keyvals ...interface{}) Logger +} + +// NewSyncWriter returns a new writer that is safe for concurrent use by +// multiple goroutines. Writes to the returned writer are passed on to w. If +// another write is already in progress, the calling goroutine blocks until +// the writer is available. +// +// If w implements the following interface, so does the returned writer. +// +// interface { +// Fd() uintptr +// } +func NewSyncWriter(w io.Writer) io.Writer { + return kitlog.NewSyncWriter(w) +} diff --git a/chains/tendermint_34/libs/log/nop_logger.go b/chains/tendermint_34/libs/log/nop_logger.go new file mode 100755 index 0000000..12d75ab --- /dev/null +++ b/chains/tendermint_34/libs/log/nop_logger.go @@ -0,0 +1,17 @@ +package log + +type nopLogger struct{} + +// Interface assertions +var _ Logger = (*nopLogger)(nil) + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return &nopLogger{} } + +func (nopLogger) Info(string, ...interface{}) {} +func (nopLogger) Debug(string, ...interface{}) {} +func (nopLogger) Error(string, ...interface{}) {} + +func (l *nopLogger) With(...interface{}) Logger { + return l +} diff --git a/chains/tendermint_34/libs/log/testing_logger.go b/chains/tendermint_34/libs/log/testing_logger.go new file mode 100755 index 0000000..8914bd8 --- /dev/null +++ b/chains/tendermint_34/libs/log/testing_logger.go @@ -0,0 +1,60 @@ +package log + +import ( + "io" + "os" + "testing" + + "github.com/go-kit/kit/log/term" +) + +var ( + // reuse the same logger across all tests + _testingLogger Logger +) + +// TestingLogger returns a TMLogger which writes to STDOUT if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLogger() must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLogger() Logger { + return TestingLoggerWithOutput(os.Stdout) +} + +// TestingLoggerWOutput returns a TMLogger which writes to (w io.Writer) if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLoggerWithOutput(w io.Writer) must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLoggerWithOutput(w io.Writer) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLogger(NewSyncWriter(w)) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} + +// TestingLoggerWithColorFn allow you to provide your own color function. See +// TestingLogger for documentation. +func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} diff --git a/chains/tendermint_34/libs/log/tm_json_logger.go b/chains/tendermint_34/libs/log/tm_json_logger.go new file mode 100755 index 0000000..a71ac10 --- /dev/null +++ b/chains/tendermint_34/libs/log/tm_json_logger.go @@ -0,0 +1,15 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/kit/log" +) + +// NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewTMJSONLogger(w io.Writer) Logger { + return &tmLogger{kitlog.NewJSONLogger(w)} +} diff --git a/chains/tendermint_34/libs/log/tm_logger.go b/chains/tendermint_34/libs/log/tm_logger.go new file mode 100755 index 0000000..d49e8d2 --- /dev/null +++ b/chains/tendermint_34/libs/log/tm_logger.go @@ -0,0 +1,83 @@ +package log + +import ( + "fmt" + "io" + + kitlog "github.com/go-kit/kit/log" + kitlevel "github.com/go-kit/kit/log/level" + "github.com/go-kit/kit/log/term" +) + +const ( + msgKey = "_msg" // "_" prefixed to avoid collisions + moduleKey = "module" +) + +type tmLogger struct { + srcLogger kitlog.Logger +} + +// Interface assertions +var _ Logger = (*tmLogger)(nil) + +// NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer +// using go-kit's log as an underlying logger and our custom formatter. Note +// that underlying logger could be swapped with something else. +func NewTMLogger(w io.Writer) Logger { + // Color by level value + colorFn := func(keyvals ...interface{}) term.FgBgColor { + if keyvals[0] != kitlevel.Key() { + panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) + } + switch keyvals[1].(kitlevel.Value).String() { + case "debug": + return term.FgBgColor{Fg: term.DarkGray} + case "error": + return term.FgBgColor{Fg: term.Red} + default: + return term.FgBgColor{} + } + } + + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// NewTMLoggerWithColorFn allows you to provide your own color function. See +// NewTMLogger for documentation. +func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// Info logs a message at level Info. +func (l *tmLogger) Info(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Info(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } +} + +// Debug logs a message at level Debug. +func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Debug(l.srcLogger) + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) + } +} + +// Error logs a message at level Error. +func (l *tmLogger) Error(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Error(l.srcLogger) + lWithMsg := kitlog.With(lWithLevel, msgKey, msg) + if err := lWithMsg.Log(keyvals...); err != nil { + lWithMsg.Log("err", err) + } +} + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Info, Debug or Error. +func (l *tmLogger) With(keyvals ...interface{}) Logger { + return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} +} diff --git a/chains/tendermint_34/libs/log/tm_logger_test.go b/chains/tendermint_34/libs/log/tm_logger_test.go new file mode 100755 index 0000000..1f890ce --- /dev/null +++ b/chains/tendermint_34/libs/log/tm_logger_test.go @@ -0,0 +1,44 @@ +package log_test + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" + + "github.com/go-logfmt/logfmt" + "github.com/tendermint/tendermint/libs/log" +) + +func TestLoggerLogsItsErrors(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMLogger(&buf) + logger.Info("foo", "baz baz", "bar") + msg := strings.TrimSpace(buf.String()) + if !strings.Contains(msg, logfmt.ErrInvalidKey.Error()) { + t.Errorf("Expected logger msg to contain ErrInvalidKey, got %s", msg) + } +} + +func BenchmarkTMLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) +} + +func BenchmarkTMLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage) +} + +func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + lc := logger.With("common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } + withInfoMessage = func(logger log.Logger) { logger.With("a", "b").Info("c", "d", "f") } +) diff --git a/chains/tendermint_34/libs/log/tmfmt_logger.go b/chains/tendermint_34/libs/log/tmfmt_logger.go new file mode 100755 index 0000000..d841263 --- /dev/null +++ b/chains/tendermint_34/libs/log/tmfmt_logger.go @@ -0,0 +1,127 @@ +package log + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + kitlog "github.com/go-kit/kit/log" + kitlevel "github.com/go-kit/kit/log/level" + "github.com/go-logfmt/logfmt" +) + +type tmfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *tmfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var tmfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc tmfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type tmfmtLogger struct { + w io.Writer +} + +// NewTMFmtLogger returns a logger that encodes keyvals to the Writer in +// Tendermint custom format. Note complex types (structs, maps, slices) +// formatted as "%+v". +// +// Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewTMFmtLogger(w io.Writer) kitlog.Logger { + return &tmfmtLogger{w} +} + +func (l tmfmtLogger) Log(keyvals ...interface{}) error { + enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) + enc.Reset() + defer tmfmtEncoderPool.Put(enc) + + const unknown = "unknown" + lvl := "none" + msg := unknown + module := unknown + + // indexes of keys to skip while encoding later + excludeIndexes := make([]int, 0) + + for i := 0; i < len(keyvals)-1; i += 2 { + // Extract level + if keyvals[i] == kitlevel.Key() { + excludeIndexes = append(excludeIndexes, i) + switch keyvals[i+1].(type) { + case string: + lvl = keyvals[i+1].(string) + case kitlevel.Value: + lvl = keyvals[i+1].(kitlevel.Value).String() + default: + panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) + } + // and message + } else if keyvals[i] == msgKey { + excludeIndexes = append(excludeIndexes, i) + msg = keyvals[i+1].(string) + // and module (could be multiple keyvals; if such case last keyvalue wins) + } else if keyvals[i] == moduleKey { + excludeIndexes = append(excludeIndexes, i) + module = keyvals[i+1].(string) + } + } + + // Form a custom Tendermint line + // + // Example: + // D[2016-05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // + // Description: + // D - first character of the level, uppercase (ASCII only) + // [2016-05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // Stopping ... - message + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2006-01-02|15:04:05.000"), msg)) + + if module != unknown { + enc.buf.WriteString("module=" + module + " ") + } + +KeyvalueLoop: + for i := 0; i < len(keyvals)-1; i += 2 { + for _, j := range excludeIndexes { + if i == j { + continue KeyvalueLoop + } + } + + err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) + if err == logfmt.ErrUnsupportedValueType { + enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) + } else if err != nil { + return err + } + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/chains/tendermint_34/libs/log/tmfmt_logger_test.go b/chains/tendermint_34/libs/log/tmfmt_logger_test.go new file mode 100755 index 0000000..d6f039c --- /dev/null +++ b/chains/tendermint_34/libs/log/tmfmt_logger_test.go @@ -0,0 +1,118 @@ +package log_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "regexp" + "testing" + + kitlog "github.com/go-kit/kit/log" + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTMFmtLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewTMFmtLogger(buf) + + if err := logger.Log("hello", "world"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("level", "error"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("_msg", "Hello"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) +} + +func BenchmarkTMFmtLoggerSimple(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage) +} + +func BenchmarkTMFmtLoggerContextual(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage) +} + +func TestTMFmtLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000) +} + +func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { + lc := kitlog.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } + withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } +) + +// These test are designed to be run with the race detector. + +func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { + n := int(math.Sqrt(float64(total))) + share := total / n + + errC := make(chan error, n) + + for i := 0; i < n; i++ { + go func() { + errC <- spam(logger, share) + }() + } + + for i := 0; i < n; i++ { + err := <-errC + if err != nil { + t.Fatalf("concurrent logging error: %v", err) + } + } +} + +func spam(logger kitlog.Logger, count int) error { + for i := 0; i < count; i++ { + err := logger.Log("key", i) + if err != nil { + return err + } + } + return nil +} + +type mymap map[int]int + +func (m mymap) String() string { return "special_behavior" } diff --git a/chains/tendermint_34/libs/log/tracing_logger.go b/chains/tendermint_34/libs/log/tracing_logger.go new file mode 100755 index 0000000..d2a6ff4 --- /dev/null +++ b/chains/tendermint_34/libs/log/tracing_logger.go @@ -0,0 +1,76 @@ +package log + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// NewTracingLogger enables tracing by wrapping all errors (if they +// implement stackTracer interface) in tracedError. +// +// All errors returned by https://github.com/pkg/errors implement stackTracer +// interface. +// +// For debugging purposes only as it doubles the amount of allocations. +func NewTracingLogger(next Logger) Logger { + return &tracingLogger{ + next: next, + } +} + +type stackTracer interface { + error + StackTrace() errors.StackTrace +} + +type tracingLogger struct { + next Logger +} + +func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { + l.next.Info(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { + l.next.Debug(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { + l.next.Error(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) With(keyvals ...interface{}) Logger { + return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} +} + +func formatErrors(keyvals []interface{}) []interface{} { + newKeyvals := make([]interface{}, len(keyvals)) + copy(newKeyvals, keyvals) + for i := 0; i < len(newKeyvals)-1; i += 2 { + if err, ok := newKeyvals[i+1].(stackTracer); ok { + newKeyvals[i+1] = tracedError{err} + } + } + return newKeyvals +} + +// tracedError wraps a stackTracer and just makes the Error() result +// always return a full stack trace. +type tracedError struct { + wrapped stackTracer +} + +var _ stackTracer = tracedError{} + +func (t tracedError) StackTrace() errors.StackTrace { + return t.wrapped.StackTrace() +} + +func (t tracedError) Cause() error { + return t.wrapped +} + +func (t tracedError) Error() string { + return fmt.Sprintf("%+v", t.wrapped) +} diff --git a/chains/tendermint_34/libs/log/tracing_logger_test.go b/chains/tendermint_34/libs/log/tracing_logger_test.go new file mode 100755 index 0000000..1abc644 --- /dev/null +++ b/chains/tendermint_34/libs/log/tracing_logger_test.go @@ -0,0 +1,41 @@ +package log_test + +import ( + "bytes" + stderr "errors" + "fmt" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/log" +) + +func TestTracingLogger(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLogger(&buf) + + logger1 := log.NewTracingLogger(logger) + err1 := errors.New("Courage is grace under pressure.") + err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.") + logger1.With("err1", err1).Info("foo", "err2", err2) + have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) + if want := strings.Replace(strings.Replace(`{"_msg":"foo","err1":"`+fmt.Sprintf("%+v", err1)+`","err2":"`+fmt.Sprintf("%+v", err2)+`","level":"info"}`, "\t", "", -1), "\n", "", -1); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("err1", stderr.New("Opportunities don't happen. You create them.")).Info("foo", "err2", stderr.New("Once you choose hope, anything's possible.")) + if want, have := `{"_msg":"foo","err1":"Opportunities don't happen. You create them.","err2":"Once you choose hope, anything's possible.","level":"info"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/chains/tendermint_34/libs/pubsub/example_test.go b/chains/tendermint_34/libs/pubsub/example_test.go new file mode 100755 index 0000000..4e4634d --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/example_test.go @@ -0,0 +1,28 @@ +package pubsub_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestExample(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}, 1) + err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]string{"abci.account.name": "John"})) + require.NoError(t, err) + assertReceive(t, "Tombstone", ch) +} diff --git a/chains/tendermint_34/libs/pubsub/pubsub.go b/chains/tendermint_34/libs/pubsub/pubsub.go new file mode 100755 index 0000000..c81c5dd --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/pubsub.go @@ -0,0 +1,401 @@ +// Package pubsub implements a pub-sub model with a single publisher (Server) +// and multiple subscribers (clients). +// +// Though you can have multiple publishers by sharing a pointer to a server or +// by giving the same channel to each publisher and publishing messages from +// that channel (fan-in). +// +// Clients subscribe for messages, which could be of any type, using a query. +// When some message is published, we match it with all queries. If there is a +// match, this message will be pushed to all clients, subscribed to that query. +// See query subpackage for our implementation. +// +// Due to the blocking send implementation, a single subscriber can freeze an +// entire server by not reading messages before it unsubscribes. To avoid such +// scenario, subscribers must either: +// +// a) make sure they continue to read from the out channel until +// Unsubscribe(All) is called +// +// s.Subscribe(ctx, sub, qry, out) +// go func() { +// for msg := range out { +// // handle msg +// // will exit automatically when out is closed by Unsubscribe(All) +// } +// }() +// s.UnsubscribeAll(ctx, sub) +// +// b) drain the out channel before calling Unsubscribe(All) +// +// s.Subscribe(ctx, sub, qry, out) +// defer func() { +// // drain out to make sure we don't block +// LOOP: +// for { +// select { +// case <-out: +// default: +// break LOOP +// } +// } +// s.UnsubscribeAll(ctx, sub) +// }() +// for msg := range out { +// // handle msg +// if err != nil { +// return err +// } +// } +// +package pubsub + +import ( + "context" + "errors" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +type operation int + +const ( + sub operation = iota + pub + unsub + shutdown +) + +var ( + // ErrSubscriptionNotFound is returned when a client tries to unsubscribe + // from not existing subscription. + ErrSubscriptionNotFound = errors.New("subscription not found") + + // ErrAlreadySubscribed is returned when a client tries to subscribe twice or + // more using the same query. + ErrAlreadySubscribed = errors.New("already subscribed") +) + +type cmd struct { + op operation + query Query + ch chan<- interface{} + clientID string + msg interface{} + tags TagMap +} + +// Query defines an interface for a query to be used for subscribing. +type Query interface { + Matches(tags TagMap) bool + String() string +} + +// Server allows clients to subscribe/unsubscribe for messages, publishing +// messages with or without tags, and manages internal state. +type Server struct { + cmn.BaseService + + cmds chan cmd + cmdsCap int + + // check if we have subscription before + // subscribing or unsubscribing + mtx sync.RWMutex + subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query +} + +// Option sets a parameter for the server. +type Option func(*Server) + +// TagMap is used to associate tags to a message. +// They can be queried by subscribers to choose messages they will received. +type TagMap interface { + // Get returns the value for a key, or nil if no value is present. + // The ok result indicates whether value was found in the tags. + Get(key string) (value string, ok bool) + // Len returns the number of tags. + Len() int +} + +type tagMap map[string]string + +var _ TagMap = (*tagMap)(nil) + +// NewTagMap constructs a new immutable tag set from a map. +func NewTagMap(data map[string]string) TagMap { + return tagMap(data) +} + +// Get returns the value for a key, or nil if no value is present. +// The ok result indicates whether value was found in the tags. +func (ts tagMap) Get(key string) (value string, ok bool) { + value, ok = ts[key] + return +} + +// Len returns the number of tags. +func (ts tagMap) Len() int { + return len(ts) +} + +// NewServer returns a new server. See the commentary on the Option functions +// for a detailed description of how to configure buffering. If no options are +// provided, the resulting server's queue is unbuffered. +func NewServer(options ...Option) *Server { + s := &Server{ + subscriptions: make(map[string]map[string]Query), + } + s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) + + for _, option := range options { + option(s) + } + + // if BufferCapacity option was not set, the channel is unbuffered + s.cmds = make(chan cmd, s.cmdsCap) + + return s +} + +// BufferCapacity allows you to specify capacity for the internal server's +// queue. Since the server, given Y subscribers, could only process X messages, +// this option could be used to survive spikes (e.g. high amount of +// transactions during peak hours). +func BufferCapacity(cap int) Option { + return func(s *Server) { + if cap > 0 { + s.cmdsCap = cap + } + } +} + +// BufferCapacity returns capacity of the internal server's queue. +func (s *Server) BufferCapacity() int { + return s.cmdsCap +} + +// Subscribe creates a subscription for the given client. It accepts a channel +// on which messages matching the given query can be received. An error will be +// returned to the caller if the context is canceled or if subscription already +// exist for pair clientID and query. +func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + _, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if ok { + return ErrAlreadySubscribed + } + + select { + case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: + s.mtx.Lock() + if _, ok = s.subscriptions[clientID]; !ok { + s.subscriptions[clientID] = make(map[string]Query) + } + // preserve original query + // see Unsubscribe + s.subscriptions[clientID][query.String()] = query + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// Unsubscribe removes the subscription on the given query. An error will be +// returned to the caller if the context is canceled or if subscription does +// not exist. +func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { + var origQuery Query + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + origQuery, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if !ok { + return ErrSubscriptionNotFound + } + + // original query is used here because we're using pointers as map keys + select { + case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}: + s.mtx.Lock() + delete(clientSubscriptions, query.String()) + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// UnsubscribeAll removes all client subscriptions. An error will be returned +// to the caller if the context is canceled or if subscription does not exist. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.mtx.RLock() + _, ok := s.subscriptions[clientID] + s.mtx.RUnlock() + if !ok { + return ErrSubscriptionNotFound + } + + select { + case s.cmds <- cmd{op: unsub, clientID: clientID}: + s.mtx.Lock() + delete(s.subscriptions, clientID) + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// Publish publishes the given message. An error will be returned to the caller +// if the context is canceled. +func (s *Server) Publish(ctx context.Context, msg interface{}) error { + return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]string))) +} + +// PublishWithTags publishes the given message with the set of tags. The set is +// matched with clients queries. If there is a match, the message is sent to +// the client. +func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error { + select { + case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// OnStop implements Service.OnStop by shutting down the server. +func (s *Server) OnStop() { + s.cmds <- cmd{op: shutdown} +} + +// NOTE: not goroutine safe +type state struct { + // query -> client -> ch + queries map[Query]map[string]chan<- interface{} + // client -> query -> struct{} + clients map[string]map[Query]struct{} +} + +// OnStart implements Service.OnStart by starting the server. +func (s *Server) OnStart() error { + go s.loop(state{ + queries: make(map[Query]map[string]chan<- interface{}), + clients: make(map[string]map[Query]struct{}), + }) + return nil +} + +// OnReset implements Service.OnReset +func (s *Server) OnReset() error { + return nil +} + +func (s *Server) loop(state state) { +loop: + for cmd := range s.cmds { + switch cmd.op { + case unsub: + if cmd.query != nil { + state.remove(cmd.clientID, cmd.query) + } else { + state.removeAll(cmd.clientID) + } + case shutdown: + for clientID := range state.clients { + state.removeAll(clientID) + } + break loop + case sub: + state.add(cmd.clientID, cmd.query, cmd.ch) + case pub: + state.send(cmd.msg, cmd.tags) + } + } +} + +func (state *state) add(clientID string, q Query, ch chan<- interface{}) { + + // initialize clientToChannelMap per query if needed + if _, ok := state.queries[q]; !ok { + state.queries[q] = make(map[string]chan<- interface{}) + } + + // create subscription + state.queries[q][clientID] = ch + + // add client if needed + if _, ok := state.clients[clientID]; !ok { + state.clients[clientID] = make(map[Query]struct{}) + } + state.clients[clientID][q] = struct{}{} +} + +func (state *state) remove(clientID string, q Query) { + clientToChannelMap, ok := state.queries[q] + if !ok { + return + } + + ch, ok := clientToChannelMap[clientID] + if ok { + close(ch) + + delete(state.clients[clientID], q) + + // if it not subscribed to anything else, remove the client + if len(state.clients[clientID]) == 0 { + delete(state.clients, clientID) + } + + delete(state.queries[q], clientID) + if len(state.queries[q]) == 0 { + delete(state.queries, q) + } + } +} + +func (state *state) removeAll(clientID string) { + queryMap, ok := state.clients[clientID] + if !ok { + return + } + + for q := range queryMap { + ch := state.queries[q][clientID] + close(ch) + + delete(state.queries[q], clientID) + if len(state.queries[q]) == 0 { + delete(state.queries, q) + } + } + delete(state.clients, clientID) +} + +func (state *state) send(msg interface{}, tags TagMap) { + for q, clientToChannelMap := range state.queries { + if q.Matches(tags) { + for _, ch := range clientToChannelMap { + ch <- msg + } + } + } +} diff --git a/chains/tendermint_34/libs/pubsub/pubsub_test.go b/chains/tendermint_34/libs/pubsub/pubsub_test.go new file mode 100755 index 0000000..5e9931e --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/pubsub_test.go @@ -0,0 +1,253 @@ +package pubsub_test + +import ( + "context" + "fmt" + "runtime/debug" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +const ( + clientID = "test-client" +) + +func TestSubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}, 1) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Publish(ctx, "Ka-Zar") + require.NoError(t, err) + assertReceive(t, "Ka-Zar", ch) + + err = s.Publish(ctx, "Quicksilver") + require.NoError(t, err) + assertReceive(t, "Quicksilver", ch) +} + +func TestDifferentClients(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch1 := make(chan interface{}, 1) + err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) + require.NoError(t, err) + assertReceive(t, "Iceman", ch1) + + ch2 := make(chan interface{}, 1) + err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"})) + require.NoError(t, err) + assertReceive(t, "Ultimo", ch1) + assertReceive(t, "Ultimo", ch2) + + ch3 := make(chan interface{}, 1) + err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewRoundStep"})) + require.NoError(t, err) + assert.Zero(t, len(ch3)) +} + +func TestClientSubscribesTwice(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + q := query.MustParse("tm.events.type='NewBlock'") + + ch1 := make(chan interface{}, 1) + err := s.Subscribe(ctx, clientID, q, ch1) + require.NoError(t, err) + err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) + require.NoError(t, err) + assertReceive(t, "Goblin Queen", ch1) + + ch2 := make(chan interface{}, 1) + err = s.Subscribe(ctx, clientID, q, ch2) + require.Error(t, err) + + err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) + require.NoError(t, err) + assertReceive(t, "Spider-Man", ch1) +} + +func TestUnsubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) + require.NoError(t, err) + + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") + + _, ok := <-ch + assert.False(t, ok) +} + +func TestResubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch := make(chan interface{}) + err := s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.Empty{}) + require.NoError(t, err) + ch = make(chan interface{}) + err = s.Subscribe(ctx, clientID, query.Empty{}, ch) + require.NoError(t, err) + + err = s.Publish(ctx, "Cable") + require.NoError(t, err) + assertReceive(t, "Cable", ch) +} + +func TestUnsubscribeAll(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + s.Start() + defer s.Stop() + + ctx := context.Background() + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) + err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1) + require.NoError(t, err) + err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2) + require.NoError(t, err) + + err = s.UnsubscribeAll(ctx, clientID) + require.NoError(t, err) + + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") + assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") + + _, ok := <-ch1 + assert.False(t, ok) + _, ok = <-ch2 + assert.False(t, ok) +} + +func TestBufferCapacity(t *testing.T) { + s := pubsub.NewServer(pubsub.BufferCapacity(2)) + s.SetLogger(log.TestingLogger()) + + assert.Equal(t, 2, s.BufferCapacity()) + + ctx := context.Background() + err := s.Publish(ctx, "Nighthawk") + require.NoError(t, err) + err = s.Publish(ctx, "Sage") + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + err = s.Publish(ctx, "Ironclad") + if assert.Error(t, err) { + assert.Equal(t, context.DeadlineExceeded, err) + } +} + +func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } +func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } +func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } + +func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } +func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } +func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } + +func benchmarkNClients(n int, b *testing.B) { + s := pubsub.NewServer() + s.Start() + defer s.Stop() + + ctx := context.Background() + for i := 0; i < n; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)})) + } +} + +func benchmarkNClientsOneQuery(n int, b *testing.B) { + s := pubsub.NewServer() + s.Start() + defer s.Stop() + + ctx := context.Background() + q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") + for i := 0; i < n; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + s.Subscribe(ctx, clientID, q, ch) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"})) + } +} + +/////////////////////////////////////////////////////////////////////////////// +/// HELPERS +/////////////////////////////////////////////////////////////////////////////// + +func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) { + select { + case actual := <-ch: + if actual != nil { + assert.Equal(t, expected, actual, msgAndArgs...) + } + case <-time.After(1 * time.Second): + t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected) + debug.PrintStack() + } +} diff --git a/chains/tendermint_34/libs/pubsub/query/Makefile b/chains/tendermint_34/libs/pubsub/query/Makefile new file mode 100755 index 0000000..aef42b2 --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/Makefile @@ -0,0 +1,11 @@ +gen_query_parser: + go get -u -v github.com/pointlander/peg + peg -inline -switch query.peg + +fuzzy_test: + go get -u -v github.com/dvyukov/go-fuzz/go-fuzz + go get -u -v github.com/dvyukov/go-fuzz/go-fuzz-build + go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test + go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output + +.PHONY: gen_query_parser fuzzy_test diff --git a/chains/tendermint_34/libs/pubsub/query/empty.go b/chains/tendermint_34/libs/pubsub/query/empty.go new file mode 100755 index 0000000..17d7ace --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/empty.go @@ -0,0 +1,16 @@ +package query + +import "github.com/tendermint/tendermint/libs/pubsub" + +// Empty query matches any set of tags. +type Empty struct { +} + +// Matches always returns true. +func (Empty) Matches(tags pubsub.TagMap) bool { + return true +} + +func (Empty) String() string { + return "empty" +} diff --git a/chains/tendermint_34/libs/pubsub/query/empty_test.go b/chains/tendermint_34/libs/pubsub/query/empty_test.go new file mode 100755 index 0000000..6183b6b --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/empty_test.go @@ -0,0 +1,18 @@ +package query_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestEmptyQueryMatchesAnything(t *testing.T) { + q := query.Empty{} + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Asher": "Roth"}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66"}))) + assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66", "Billy": "Blue"}))) +} diff --git a/chains/tendermint_34/libs/pubsub/query/fuzz_test/main.go b/chains/tendermint_34/libs/pubsub/query/fuzz_test/main.go new file mode 100755 index 0000000..7a46116 --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/fuzz_test/main.go @@ -0,0 +1,30 @@ +package fuzz_test + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func Fuzz(data []byte) int { + sdata := string(data) + q0, err := query.New(sdata) + if err != nil { + return 0 + } + + sdata1 := q0.String() + q1, err := query.New(sdata1) + if err != nil { + panic(err) + } + + sdata2 := q1.String() + if sdata1 != sdata2 { + fmt.Printf("q0: %q\n", sdata1) + fmt.Printf("q1: %q\n", sdata2) + panic("query changed") + } + + return 1 +} diff --git a/chains/tendermint_34/libs/pubsub/query/parser_test.go b/chains/tendermint_34/libs/pubsub/query/parser_test.go new file mode 100755 index 0000000..c065eab --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/parser_test.go @@ -0,0 +1,91 @@ +package query_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestParser(t *testing.T) { + cases := []struct { + query string + valid bool + }{ + {"tm.events.type='NewBlock'", true}, + {"tm.events.type = 'NewBlock'", true}, + {"tm.events.name = ''", true}, + {"tm.events.type='TIME'", true}, + {"tm.events.type='DATE'", true}, + {"tm.events.type='='", true}, + {"tm.events.type='TIME", false}, + {"tm.events.type=TIME'", false}, + {"tm.events.type==", false}, + {"tm.events.type=NewBlock", false}, + {">==", false}, + {"tm.events.type 'NewBlock' =", false}, + {"tm.events.type>'NewBlock'", false}, + {"", false}, + {"=", false}, + {"='NewBlock'", false}, + {"tm.events.type=", false}, + + {"tm.events.typeNewBlock", false}, + {"tm.events.type'NewBlock'", false}, + {"'NewBlock'", false}, + {"NewBlock", false}, + {"", false}, + + {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, + {"tm.events.type='NewBlock' AND", false}, + {"tm.events.type='NewBlock' AN", false}, + {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, + {"AND tm.events.type='NewBlock' ", false}, + + {"abci.account.name CONTAINS 'Igor'", true}, + + {"tx.date > DATE 2013-05-03", true}, + {"tx.date < DATE 2013-05-03", true}, + {"tx.date <= DATE 2013-05-03", true}, + {"tx.date >= DATE 2013-05-03", true}, + {"tx.date >= DAT 2013-05-03", false}, + {"tx.date <= DATE2013-05-03", false}, + {"tx.date <= DATE -05-03", false}, + {"tx.date >= DATE 20130503", false}, + {"tx.date >= DATE 2013+01-03", false}, + // incorrect year, month, day + {"tx.date >= DATE 0013-01-03", false}, + {"tx.date >= DATE 2013-31-03", false}, + {"tx.date >= DATE 2013-01-83", false}, + + {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, + {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, + {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME2013-05-03T14:45:00Z", false}, + {"tx.date = IME 2013-05-03T14:45:00Z", false}, + {"tx.date = TIME 2013-05-:45:00Z", false}, + {"tx.date >= TIME 2013-05-03T14:45:00", false}, + {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, + {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, + + {"account.balance=100", true}, + {"account.balance >= 200", true}, + {"account.balance >= -300", false}, + {"account.balance >>= 400", false}, + {"account.balance=33.22.1", false}, + + {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, + {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, + } + + for _, c := range cases { + _, err := query.New(c.query) + if c.valid { + assert.NoErrorf(t, err, "Query was '%s'", c.query) + } else { + assert.Errorf(t, err, "Query was '%s'", c.query) + } + } +} diff --git a/chains/tendermint_34/libs/pubsub/query/query.go b/chains/tendermint_34/libs/pubsub/query/query.go new file mode 100755 index 0000000..ec18748 --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/query.go @@ -0,0 +1,339 @@ +// Package query provides a parser for a custom query format: +// +// abci.invoice.number=22 AND abci.invoice.owner=Ivan +// +// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. +// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics +// +// It has a support for numbers (integer and floating point), dates and times. +package query + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/tendermint/tendermint/libs/pubsub" +) + +// Query holds the query string and the query parser. +type Query struct { + str string + parser *QueryParser +} + +// Condition represents a single condition within a query and consists of tag +// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7"). +type Condition struct { + Tag string + Op Operator + Operand interface{} +} + +// New parses the given string and returns a query or error if the string is +// invalid. +func New(s string) (*Query, error) { + p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)} + p.Init() + if err := p.Parse(); err != nil { + return nil, err + } + return &Query{str: s, parser: p}, nil +} + +// MustParse turns the given string into a query or panics; for tests or others +// cases where you know the string is valid. +func MustParse(s string) *Query { + q, err := New(s) + if err != nil { + panic(fmt.Sprintf("failed to parse %s: %v", s, err)) + } + return q +} + +// String returns the original string. +func (q *Query) String() string { + return q.str +} + +// Operator is an operator that defines some kind of relation between tag and +// operand (equality, etc.). +type Operator uint8 + +const ( + // "<=" + OpLessEqual Operator = iota + // ">=" + OpGreaterEqual + // "<" + OpLess + // ">" + OpGreater + // "=" + OpEqual + // "CONTAINS"; used to check if a string contains a certain sub string. + OpContains +) + +const ( + // DateLayout defines a layout for all dates (`DATE date`) + DateLayout = "2006-01-02" + // TimeLayout defines a layout for all times (`TIME time`) + TimeLayout = time.RFC3339 +) + +// Conditions returns a list of conditions. +func (q *Query) Conditions() []Condition { + conditions := make([]Condition, 0) + + buffer, begin, end := q.parser.Buffer, 0, 0 + + var tag string + var op Operator + + // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + for _, token := range q.parser.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + case ruletag: + tag = buffer[begin:end] + case rulele: + op = OpLessEqual + case rulege: + op = OpGreaterEqual + case rulel: + op = OpLess + case ruleg: + op = OpGreater + case ruleequal: + op = OpEqual + case rulecontains: + op = OpContains + case rulevalue: + // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") + valueWithoutSingleQuotes := buffer[begin+1 : end-1] + conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes}) + case rulenumber: + number := buffer[begin:end] + if strings.ContainsAny(number, ".") { // if it looks like a floating-point number + value, err := strconv.ParseFloat(number, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) + } + conditions = append(conditions, Condition{tag, op, value}) + } else { + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) + } + conditions = append(conditions, Condition{tag, op, value}) + } + case ruletime: + value, err := time.Parse(TimeLayout, buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + conditions = append(conditions, Condition{tag, op, value}) + case ruledate: + value, err := time.Parse("2006-01-02", buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + conditions = append(conditions, Condition{tag, op, value}) + } + } + + return conditions +} + +// Matches returns true if the query matches the given set of tags, false otherwise. +// +// For example, query "name=John" matches tags = {"name": "John"}. More +// examples could be found in parser_test.go and query_test.go. +func (q *Query) Matches(tags pubsub.TagMap) bool { + if tags.Len() == 0 { + return false + } + + buffer, begin, end := q.parser.Buffer, 0, 0 + + var tag string + var op Operator + + // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + for _, token := range q.parser.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + case ruletag: + tag = buffer[begin:end] + case rulele: + op = OpLessEqual + case rulege: + op = OpGreaterEqual + case rulel: + op = OpLess + case ruleg: + op = OpGreater + case ruleequal: + op = OpEqual + case rulecontains: + op = OpContains + case rulevalue: + // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") + valueWithoutSingleQuotes := buffer[begin+1 : end-1] + + // see if the triplet (tag, operator, operand) matches any tag + // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } + if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { + return false + } + case rulenumber: + number := buffer[begin:end] + if strings.ContainsAny(number, ".") { // if it looks like a floating-point number + value, err := strconv.ParseFloat(number, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } else { + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } + case ruletime: + value, err := time.Parse(TimeLayout, buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + case ruledate: + value, err := time.Parse("2006-01-02", buffer[begin:end]) + if err != nil { + panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) + } + if !match(tag, op, reflect.ValueOf(value), tags) { + return false + } + } + } + + return true +} + +// match returns true if the given triplet (tag, operator, operand) matches any tag. +// +// First, it looks up the tag in tags and if it finds one, tries to compare the +// value from it to the operand using the operator. +// +// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } +func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool { + // look up the tag from the query in tags + value, ok := tags.Get(tag) + if !ok { + return false + } + switch operand.Kind() { + case reflect.Struct: // time + operandAsTime := operand.Interface().(time.Time) + // try our best to convert value from tags to time.Time + var ( + v time.Time + err error + ) + if strings.ContainsAny(value, "T") { + v, err = time.Parse(TimeLayout, value) + } else { + v, err = time.Parse(DateLayout, value) + } + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err)) + } + switch op { + case OpLessEqual: + return v.Before(operandAsTime) || v.Equal(operandAsTime) + case OpGreaterEqual: + return v.Equal(operandAsTime) || v.After(operandAsTime) + case OpLess: + return v.Before(operandAsTime) + case OpGreater: + return v.After(operandAsTime) + case OpEqual: + return v.Equal(operandAsTime) + } + case reflect.Float64: + operandFloat64 := operand.Interface().(float64) + var v float64 + // try our best to convert value from tags to float64 + v, err := strconv.ParseFloat(value, 64) + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + } + switch op { + case OpLessEqual: + return v <= operandFloat64 + case OpGreaterEqual: + return v >= operandFloat64 + case OpLess: + return v < operandFloat64 + case OpGreater: + return v > operandFloat64 + case OpEqual: + return v == operandFloat64 + } + case reflect.Int64: + operandInt := operand.Interface().(int64) + var v int64 + // if value looks like float, we try to parse it as float + if strings.ContainsAny(value, ".") { + v1, err := strconv.ParseFloat(value, 64) + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + } + v = int64(v1) + } else { + var err error + // try our best to convert value from tags to int64 + v, err = strconv.ParseInt(value, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err)) + } + } + switch op { + case OpLessEqual: + return v <= operandInt + case OpGreaterEqual: + return v >= operandInt + case OpLess: + return v < operandInt + case OpGreater: + return v > operandInt + case OpEqual: + return v == operandInt + } + case reflect.String: + switch op { + case OpEqual: + return value == operand.String() + case OpContains: + return strings.Contains(value, operand.String()) + } + default: + panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) + } + + return false +} diff --git a/chains/tendermint_34/libs/pubsub/query/query.peg b/chains/tendermint_34/libs/pubsub/query/query.peg new file mode 100755 index 0000000..739892e --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/query.peg @@ -0,0 +1,33 @@ +package query + +type QueryParser Peg { +} + +e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !. + +condition <- tag ' '* (le ' '* (number / time / date) + / ge ' '* (number / time / date) + / l ' '* (number / time / date) + / g ' '* (number / time / date) + / equal ' '* (number / time / date / value) + / contains ' '* value + ) + +tag <- < (![ \t\n\r\\()"'=><] .)+ > +value <- < '\'' (!["'] .)* '\''> +number <- < ('0' + / [1-9] digit* ('.' digit*)?) > +digit <- [0-9] +time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') > +date <- "DATE " < year '-' month '-' day > +year <- ('1' / '2') digit digit digit +month <- ('0' / '1') digit +day <- ('0' / '1' / '2' / '3') digit +and <- "AND" + +equal <- "=" +contains <- "CONTAINS" +le <- "<=" +ge <- ">=" +l <- "<" +g <- ">" diff --git a/chains/tendermint_34/libs/pubsub/query/query.peg.go b/chains/tendermint_34/libs/pubsub/query/query.peg.go new file mode 100755 index 0000000..c1cc60a --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/query.peg.go @@ -0,0 +1,1555 @@ +// nolint +package query + +//go:generate peg -inline -switch query.peg + +import ( + "fmt" + "math" + "sort" + "strconv" +) + +const endSymbol rune = 1114112 + +/* The rule types inferred from the grammar are below. */ +type pegRule uint8 + +const ( + ruleUnknown pegRule = iota + rulee + rulecondition + ruletag + rulevalue + rulenumber + ruledigit + ruletime + ruledate + ruleyear + rulemonth + ruleday + ruleand + ruleequal + rulecontains + rulele + rulege + rulel + ruleg + rulePegText +) + +var rul3s = [...]string{ + "Unknown", + "e", + "condition", + "tag", + "value", + "number", + "digit", + "time", + "date", + "year", + "month", + "day", + "and", + "equal", + "contains", + "le", + "ge", + "l", + "g", + "PegText", +} + +type token32 struct { + pegRule + begin, end uint32 +} + +func (t *token32) String() string { + return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) +} + +type node32 struct { + token32 + up, next *node32 +} + +func (node *node32) print(pretty bool, buffer string) { + var print func(node *node32, depth int) + print = func(node *node32, depth int) { + for node != nil { + for c := 0; c < depth; c++ { + fmt.Printf(" ") + } + rule := rul3s[node.pegRule] + quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) + if !pretty { + fmt.Printf("%v %v\n", rule, quote) + } else { + fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) + } + if node.up != nil { + print(node.up, depth+1) + } + node = node.next + } + } + print(node, 0) +} + +func (node *node32) Print(buffer string) { + node.print(false, buffer) +} + +func (node *node32) PrettyPrint(buffer string) { + node.print(true, buffer) +} + +type tokens32 struct { + tree []token32 +} + +func (t *tokens32) Trim(length uint32) { + t.tree = t.tree[:length] +} + +func (t *tokens32) Print() { + for _, token := range t.tree { + fmt.Println(token.String()) + } +} + +func (t *tokens32) AST() *node32 { + type element struct { + node *node32 + down *element + } + tokens := t.Tokens() + var stack *element + for _, token := range tokens { + if token.begin == token.end { + continue + } + node := &node32{token32: token} + for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { + stack.node.next = node.up + node.up = stack.node + stack = stack.down + } + stack = &element{node: node, down: stack} + } + if stack != nil { + return stack.node + } + return nil +} + +func (t *tokens32) PrintSyntaxTree(buffer string) { + t.AST().Print(buffer) +} + +func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { + t.AST().PrettyPrint(buffer) +} + +func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { + if tree := t.tree; int(index) >= len(tree) { + expanded := make([]token32, 2*len(tree)) + copy(expanded, tree) + t.tree = expanded + } + t.tree[index] = token32{ + pegRule: rule, + begin: begin, + end: end, + } +} + +func (t *tokens32) Tokens() []token32 { + return t.tree +} + +type QueryParser struct { + Buffer string + buffer []rune + rules [20]func() bool + parse func(rule ...int) error + reset func() + Pretty bool + tokens32 +} + +func (p *QueryParser) Parse(rule ...int) error { + return p.parse(rule...) +} + +func (p *QueryParser) Reset() { + p.reset() +} + +type textPosition struct { + line, symbol int +} + +type textPositionMap map[int]textPosition + +func translatePositions(buffer []rune, positions []int) textPositionMap { + length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 + sort.Ints(positions) + +search: + for i, c := range buffer { + if c == '\n' { + line, symbol = line+1, 0 + } else { + symbol++ + } + if i == positions[j] { + translations[positions[j]] = textPosition{line, symbol} + for j++; j < length; j++ { + if i != positions[j] { + continue search + } + } + break search + } + } + + return translations +} + +type parseError struct { + p *QueryParser + max token32 +} + +func (e *parseError) Error() string { + tokens, error := []token32{e.max}, "\n" + positions, p := make([]int, 2*len(tokens)), 0 + for _, token := range tokens { + positions[p], p = int(token.begin), p+1 + positions[p], p = int(token.end), p+1 + } + translations := translatePositions(e.p.buffer, positions) + format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" + if e.p.Pretty { + format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" + } + for _, token := range tokens { + begin, end := int(token.begin), int(token.end) + error += fmt.Sprintf(format, + rul3s[token.pegRule], + translations[begin].line, translations[begin].symbol, + translations[end].line, translations[end].symbol, + strconv.Quote(string(e.p.buffer[begin:end]))) + } + + return error +} + +func (p *QueryParser) PrintSyntaxTree() { + if p.Pretty { + p.tokens32.PrettyPrintSyntaxTree(p.Buffer) + } else { + p.tokens32.PrintSyntaxTree(p.Buffer) + } +} + +func (p *QueryParser) Init() { + var ( + max token32 + position, tokenIndex uint32 + buffer []rune + ) + p.reset = func() { + max = token32{} + position, tokenIndex = 0, 0 + + p.buffer = []rune(p.Buffer) + if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { + p.buffer = append(p.buffer, endSymbol) + } + buffer = p.buffer + } + p.reset() + + _rules := p.rules + tree := tokens32{tree: make([]token32, math.MaxInt16)} + p.parse = func(rule ...int) error { + r := 1 + if len(rule) > 0 { + r = rule[0] + } + matches := p.rules[r]() + p.tokens32 = tree + if matches { + p.Trim(tokenIndex) + return nil + } + return &parseError{p, max} + } + + add := func(rule pegRule, begin uint32) { + tree.Add(rule, begin, position, tokenIndex) + tokenIndex++ + if begin != position && position > max.end { + max = token32{rule, begin, position} + } + } + + matchDot := func() bool { + if buffer[position] != endSymbol { + position++ + return true + } + return false + } + + /*matchChar := func(c byte) bool { + if buffer[position] == c { + position++ + return true + } + return false + }*/ + + /*matchRange := func(lower byte, upper byte) bool { + if c := buffer[position]; c >= lower && c <= upper { + position++ + return true + } + return false + }*/ + + _rules = [...]func() bool{ + nil, + /* 0 e <- <('"' condition (' '+ and ' '+ condition)* '"' !.)> */ + func() bool { + position0, tokenIndex0 := position, tokenIndex + { + position1 := position + if buffer[position] != rune('"') { + goto l0 + } + position++ + if !_rules[rulecondition]() { + goto l0 + } + l2: + { + position3, tokenIndex3 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l3 + } + position++ + l4: + { + position5, tokenIndex5 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l5 + } + position++ + goto l4 + l5: + position, tokenIndex = position5, tokenIndex5 + } + { + position6 := position + { + position7, tokenIndex7 := position, tokenIndex + if buffer[position] != rune('a') { + goto l8 + } + position++ + goto l7 + l8: + position, tokenIndex = position7, tokenIndex7 + if buffer[position] != rune('A') { + goto l3 + } + position++ + } + l7: + { + position9, tokenIndex9 := position, tokenIndex + if buffer[position] != rune('n') { + goto l10 + } + position++ + goto l9 + l10: + position, tokenIndex = position9, tokenIndex9 + if buffer[position] != rune('N') { + goto l3 + } + position++ + } + l9: + { + position11, tokenIndex11 := position, tokenIndex + if buffer[position] != rune('d') { + goto l12 + } + position++ + goto l11 + l12: + position, tokenIndex = position11, tokenIndex11 + if buffer[position] != rune('D') { + goto l3 + } + position++ + } + l11: + add(ruleand, position6) + } + if buffer[position] != rune(' ') { + goto l3 + } + position++ + l13: + { + position14, tokenIndex14 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l14 + } + position++ + goto l13 + l14: + position, tokenIndex = position14, tokenIndex14 + } + if !_rules[rulecondition]() { + goto l3 + } + goto l2 + l3: + position, tokenIndex = position3, tokenIndex3 + } + if buffer[position] != rune('"') { + goto l0 + } + position++ + { + position15, tokenIndex15 := position, tokenIndex + if !matchDot() { + goto l15 + } + goto l0 + l15: + position, tokenIndex = position15, tokenIndex15 + } + add(rulee, position1) + } + return true + l0: + position, tokenIndex = position0, tokenIndex0 + return false + }, + /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* ((&('\'') value) | (&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ + func() bool { + position16, tokenIndex16 := position, tokenIndex + { + position17 := position + { + position18 := position + { + position19 := position + { + position22, tokenIndex22 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l22 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l22 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l22 + } + position++ + break + case '\'': + if buffer[position] != rune('\'') { + goto l22 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l22 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l22 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l22 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l22 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l22 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l22 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l22 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l22 + } + position++ + break + } + } + + goto l16 + l22: + position, tokenIndex = position22, tokenIndex22 + } + if !matchDot() { + goto l16 + } + l20: + { + position21, tokenIndex21 := position, tokenIndex + { + position24, tokenIndex24 := position, tokenIndex + { + switch buffer[position] { + case '<': + if buffer[position] != rune('<') { + goto l24 + } + position++ + break + case '>': + if buffer[position] != rune('>') { + goto l24 + } + position++ + break + case '=': + if buffer[position] != rune('=') { + goto l24 + } + position++ + break + case '\'': + if buffer[position] != rune('\'') { + goto l24 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l24 + } + position++ + break + case ')': + if buffer[position] != rune(')') { + goto l24 + } + position++ + break + case '(': + if buffer[position] != rune('(') { + goto l24 + } + position++ + break + case '\\': + if buffer[position] != rune('\\') { + goto l24 + } + position++ + break + case '\r': + if buffer[position] != rune('\r') { + goto l24 + } + position++ + break + case '\n': + if buffer[position] != rune('\n') { + goto l24 + } + position++ + break + case '\t': + if buffer[position] != rune('\t') { + goto l24 + } + position++ + break + default: + if buffer[position] != rune(' ') { + goto l24 + } + position++ + break + } + } + + goto l21 + l24: + position, tokenIndex = position24, tokenIndex24 + } + if !matchDot() { + goto l21 + } + goto l20 + l21: + position, tokenIndex = position21, tokenIndex21 + } + add(rulePegText, position19) + } + add(ruletag, position18) + } + l26: + { + position27, tokenIndex27 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l27 + } + position++ + goto l26 + l27: + position, tokenIndex = position27, tokenIndex27 + } + { + position28, tokenIndex28 := position, tokenIndex + { + position30 := position + if buffer[position] != rune('<') { + goto l29 + } + position++ + if buffer[position] != rune('=') { + goto l29 + } + position++ + add(rulele, position30) + } + l31: + { + position32, tokenIndex32 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l32 + } + position++ + goto l31 + l32: + position, tokenIndex = position32, tokenIndex32 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l29 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l29 + } + break + default: + if !_rules[rulenumber]() { + goto l29 + } + break + } + } + + goto l28 + l29: + position, tokenIndex = position28, tokenIndex28 + { + position35 := position + if buffer[position] != rune('>') { + goto l34 + } + position++ + if buffer[position] != rune('=') { + goto l34 + } + position++ + add(rulege, position35) + } + l36: + { + position37, tokenIndex37 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l37 + } + position++ + goto l36 + l37: + position, tokenIndex = position37, tokenIndex37 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l34 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l34 + } + break + default: + if !_rules[rulenumber]() { + goto l34 + } + break + } + } + + goto l28 + l34: + position, tokenIndex = position28, tokenIndex28 + { + switch buffer[position] { + case '=': + { + position40 := position + if buffer[position] != rune('=') { + goto l16 + } + position++ + add(ruleequal, position40) + } + l41: + { + position42, tokenIndex42 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l42 + } + position++ + goto l41 + l42: + position, tokenIndex = position42, tokenIndex42 + } + { + switch buffer[position] { + case '\'': + if !_rules[rulevalue]() { + goto l16 + } + break + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + case '>': + { + position44 := position + if buffer[position] != rune('>') { + goto l16 + } + position++ + add(ruleg, position44) + } + l45: + { + position46, tokenIndex46 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l46 + } + position++ + goto l45 + l46: + position, tokenIndex = position46, tokenIndex46 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + case '<': + { + position48 := position + if buffer[position] != rune('<') { + goto l16 + } + position++ + add(rulel, position48) + } + l49: + { + position50, tokenIndex50 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l50 + } + position++ + goto l49 + l50: + position, tokenIndex = position50, tokenIndex50 + } + { + switch buffer[position] { + case 'D', 'd': + if !_rules[ruledate]() { + goto l16 + } + break + case 'T', 't': + if !_rules[ruletime]() { + goto l16 + } + break + default: + if !_rules[rulenumber]() { + goto l16 + } + break + } + } + + break + default: + { + position52 := position + { + position53, tokenIndex53 := position, tokenIndex + if buffer[position] != rune('c') { + goto l54 + } + position++ + goto l53 + l54: + position, tokenIndex = position53, tokenIndex53 + if buffer[position] != rune('C') { + goto l16 + } + position++ + } + l53: + { + position55, tokenIndex55 := position, tokenIndex + if buffer[position] != rune('o') { + goto l56 + } + position++ + goto l55 + l56: + position, tokenIndex = position55, tokenIndex55 + if buffer[position] != rune('O') { + goto l16 + } + position++ + } + l55: + { + position57, tokenIndex57 := position, tokenIndex + if buffer[position] != rune('n') { + goto l58 + } + position++ + goto l57 + l58: + position, tokenIndex = position57, tokenIndex57 + if buffer[position] != rune('N') { + goto l16 + } + position++ + } + l57: + { + position59, tokenIndex59 := position, tokenIndex + if buffer[position] != rune('t') { + goto l60 + } + position++ + goto l59 + l60: + position, tokenIndex = position59, tokenIndex59 + if buffer[position] != rune('T') { + goto l16 + } + position++ + } + l59: + { + position61, tokenIndex61 := position, tokenIndex + if buffer[position] != rune('a') { + goto l62 + } + position++ + goto l61 + l62: + position, tokenIndex = position61, tokenIndex61 + if buffer[position] != rune('A') { + goto l16 + } + position++ + } + l61: + { + position63, tokenIndex63 := position, tokenIndex + if buffer[position] != rune('i') { + goto l64 + } + position++ + goto l63 + l64: + position, tokenIndex = position63, tokenIndex63 + if buffer[position] != rune('I') { + goto l16 + } + position++ + } + l63: + { + position65, tokenIndex65 := position, tokenIndex + if buffer[position] != rune('n') { + goto l66 + } + position++ + goto l65 + l66: + position, tokenIndex = position65, tokenIndex65 + if buffer[position] != rune('N') { + goto l16 + } + position++ + } + l65: + { + position67, tokenIndex67 := position, tokenIndex + if buffer[position] != rune('s') { + goto l68 + } + position++ + goto l67 + l68: + position, tokenIndex = position67, tokenIndex67 + if buffer[position] != rune('S') { + goto l16 + } + position++ + } + l67: + add(rulecontains, position52) + } + l69: + { + position70, tokenIndex70 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l70 + } + position++ + goto l69 + l70: + position, tokenIndex = position70, tokenIndex70 + } + if !_rules[rulevalue]() { + goto l16 + } + break + } + } + + } + l28: + add(rulecondition, position17) + } + return true + l16: + position, tokenIndex = position16, tokenIndex16 + return false + }, + /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('\'') '\'') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ + nil, + /* 3 value <- <<('\'' (!('"' / '\'') .)* '\'')>> */ + func() bool { + position72, tokenIndex72 := position, tokenIndex + { + position73 := position + { + position74 := position + if buffer[position] != rune('\'') { + goto l72 + } + position++ + l75: + { + position76, tokenIndex76 := position, tokenIndex + { + position77, tokenIndex77 := position, tokenIndex + { + position78, tokenIndex78 := position, tokenIndex + if buffer[position] != rune('"') { + goto l79 + } + position++ + goto l78 + l79: + position, tokenIndex = position78, tokenIndex78 + if buffer[position] != rune('\'') { + goto l77 + } + position++ + } + l78: + goto l76 + l77: + position, tokenIndex = position77, tokenIndex77 + } + if !matchDot() { + goto l76 + } + goto l75 + l76: + position, tokenIndex = position76, tokenIndex76 + } + if buffer[position] != rune('\'') { + goto l72 + } + position++ + add(rulePegText, position74) + } + add(rulevalue, position73) + } + return true + l72: + position, tokenIndex = position72, tokenIndex72 + return false + }, + /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ + func() bool { + position80, tokenIndex80 := position, tokenIndex + { + position81 := position + { + position82 := position + { + position83, tokenIndex83 := position, tokenIndex + if buffer[position] != rune('0') { + goto l84 + } + position++ + goto l83 + l84: + position, tokenIndex = position83, tokenIndex83 + if c := buffer[position]; c < rune('1') || c > rune('9') { + goto l80 + } + position++ + l85: + { + position86, tokenIndex86 := position, tokenIndex + if !_rules[ruledigit]() { + goto l86 + } + goto l85 + l86: + position, tokenIndex = position86, tokenIndex86 + } + { + position87, tokenIndex87 := position, tokenIndex + if buffer[position] != rune('.') { + goto l87 + } + position++ + l89: + { + position90, tokenIndex90 := position, tokenIndex + if !_rules[ruledigit]() { + goto l90 + } + goto l89 + l90: + position, tokenIndex = position90, tokenIndex90 + } + goto l88 + l87: + position, tokenIndex = position87, tokenIndex87 + } + l88: + } + l83: + add(rulePegText, position82) + } + add(rulenumber, position81) + } + return true + l80: + position, tokenIndex = position80, tokenIndex80 + return false + }, + /* 5 digit <- <[0-9]> */ + func() bool { + position91, tokenIndex91 := position, tokenIndex + { + position92 := position + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l91 + } + position++ + add(ruledigit, position92) + } + return true + l91: + position, tokenIndex = position91, tokenIndex91 + return false + }, + /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ + func() bool { + position93, tokenIndex93 := position, tokenIndex + { + position94 := position + { + position95, tokenIndex95 := position, tokenIndex + if buffer[position] != rune('t') { + goto l96 + } + position++ + goto l95 + l96: + position, tokenIndex = position95, tokenIndex95 + if buffer[position] != rune('T') { + goto l93 + } + position++ + } + l95: + { + position97, tokenIndex97 := position, tokenIndex + if buffer[position] != rune('i') { + goto l98 + } + position++ + goto l97 + l98: + position, tokenIndex = position97, tokenIndex97 + if buffer[position] != rune('I') { + goto l93 + } + position++ + } + l97: + { + position99, tokenIndex99 := position, tokenIndex + if buffer[position] != rune('m') { + goto l100 + } + position++ + goto l99 + l100: + position, tokenIndex = position99, tokenIndex99 + if buffer[position] != rune('M') { + goto l93 + } + position++ + } + l99: + { + position101, tokenIndex101 := position, tokenIndex + if buffer[position] != rune('e') { + goto l102 + } + position++ + goto l101 + l102: + position, tokenIndex = position101, tokenIndex101 + if buffer[position] != rune('E') { + goto l93 + } + position++ + } + l101: + if buffer[position] != rune(' ') { + goto l93 + } + position++ + { + position103 := position + if !_rules[ruleyear]() { + goto l93 + } + if buffer[position] != rune('-') { + goto l93 + } + position++ + if !_rules[rulemonth]() { + goto l93 + } + if buffer[position] != rune('-') { + goto l93 + } + position++ + if !_rules[ruleday]() { + goto l93 + } + if buffer[position] != rune('T') { + goto l93 + } + position++ + if !_rules[ruledigit]() { + goto l93 + } + if !_rules[ruledigit]() { + goto l93 + } + if buffer[position] != rune(':') { + goto l93 + } + position++ + if !_rules[ruledigit]() { + goto l93 + } + if !_rules[ruledigit]() { + goto l93 + } + if buffer[position] != rune(':') { + goto l93 + } + position++ + if !_rules[ruledigit]() { + goto l93 + } + if !_rules[ruledigit]() { + goto l93 + } + { + position104, tokenIndex104 := position, tokenIndex + { + position106, tokenIndex106 := position, tokenIndex + if buffer[position] != rune('-') { + goto l107 + } + position++ + goto l106 + l107: + position, tokenIndex = position106, tokenIndex106 + if buffer[position] != rune('+') { + goto l105 + } + position++ + } + l106: + if !_rules[ruledigit]() { + goto l105 + } + if !_rules[ruledigit]() { + goto l105 + } + if buffer[position] != rune(':') { + goto l105 + } + position++ + if !_rules[ruledigit]() { + goto l105 + } + if !_rules[ruledigit]() { + goto l105 + } + goto l104 + l105: + position, tokenIndex = position104, tokenIndex104 + if buffer[position] != rune('Z') { + goto l93 + } + position++ + } + l104: + add(rulePegText, position103) + } + add(ruletime, position94) + } + return true + l93: + position, tokenIndex = position93, tokenIndex93 + return false + }, + /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ + func() bool { + position108, tokenIndex108 := position, tokenIndex + { + position109 := position + { + position110, tokenIndex110 := position, tokenIndex + if buffer[position] != rune('d') { + goto l111 + } + position++ + goto l110 + l111: + position, tokenIndex = position110, tokenIndex110 + if buffer[position] != rune('D') { + goto l108 + } + position++ + } + l110: + { + position112, tokenIndex112 := position, tokenIndex + if buffer[position] != rune('a') { + goto l113 + } + position++ + goto l112 + l113: + position, tokenIndex = position112, tokenIndex112 + if buffer[position] != rune('A') { + goto l108 + } + position++ + } + l112: + { + position114, tokenIndex114 := position, tokenIndex + if buffer[position] != rune('t') { + goto l115 + } + position++ + goto l114 + l115: + position, tokenIndex = position114, tokenIndex114 + if buffer[position] != rune('T') { + goto l108 + } + position++ + } + l114: + { + position116, tokenIndex116 := position, tokenIndex + if buffer[position] != rune('e') { + goto l117 + } + position++ + goto l116 + l117: + position, tokenIndex = position116, tokenIndex116 + if buffer[position] != rune('E') { + goto l108 + } + position++ + } + l116: + if buffer[position] != rune(' ') { + goto l108 + } + position++ + { + position118 := position + if !_rules[ruleyear]() { + goto l108 + } + if buffer[position] != rune('-') { + goto l108 + } + position++ + if !_rules[rulemonth]() { + goto l108 + } + if buffer[position] != rune('-') { + goto l108 + } + position++ + if !_rules[ruleday]() { + goto l108 + } + add(rulePegText, position118) + } + add(ruledate, position109) + } + return true + l108: + position, tokenIndex = position108, tokenIndex108 + return false + }, + /* 8 year <- <(('1' / '2') digit digit digit)> */ + func() bool { + position119, tokenIndex119 := position, tokenIndex + { + position120 := position + { + position121, tokenIndex121 := position, tokenIndex + if buffer[position] != rune('1') { + goto l122 + } + position++ + goto l121 + l122: + position, tokenIndex = position121, tokenIndex121 + if buffer[position] != rune('2') { + goto l119 + } + position++ + } + l121: + if !_rules[ruledigit]() { + goto l119 + } + if !_rules[ruledigit]() { + goto l119 + } + if !_rules[ruledigit]() { + goto l119 + } + add(ruleyear, position120) + } + return true + l119: + position, tokenIndex = position119, tokenIndex119 + return false + }, + /* 9 month <- <(('0' / '1') digit)> */ + func() bool { + position123, tokenIndex123 := position, tokenIndex + { + position124 := position + { + position125, tokenIndex125 := position, tokenIndex + if buffer[position] != rune('0') { + goto l126 + } + position++ + goto l125 + l126: + position, tokenIndex = position125, tokenIndex125 + if buffer[position] != rune('1') { + goto l123 + } + position++ + } + l125: + if !_rules[ruledigit]() { + goto l123 + } + add(rulemonth, position124) + } + return true + l123: + position, tokenIndex = position123, tokenIndex123 + return false + }, + /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ + func() bool { + position127, tokenIndex127 := position, tokenIndex + { + position128 := position + { + switch buffer[position] { + case '3': + if buffer[position] != rune('3') { + goto l127 + } + position++ + break + case '2': + if buffer[position] != rune('2') { + goto l127 + } + position++ + break + case '1': + if buffer[position] != rune('1') { + goto l127 + } + position++ + break + default: + if buffer[position] != rune('0') { + goto l127 + } + position++ + break + } + } + + if !_rules[ruledigit]() { + goto l127 + } + add(ruleday, position128) + } + return true + l127: + position, tokenIndex = position127, tokenIndex127 + return false + }, + /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ + nil, + /* 12 equal <- <'='> */ + nil, + /* 13 contains <- <(('c' / 'C') ('o' / 'O') ('n' / 'N') ('t' / 'T') ('a' / 'A') ('i' / 'I') ('n' / 'N') ('s' / 'S'))> */ + nil, + /* 14 le <- <('<' '=')> */ + nil, + /* 15 ge <- <('>' '=')> */ + nil, + /* 16 l <- <'<'> */ + nil, + /* 17 g <- <'>'> */ + nil, + nil, + } + p.rules = _rules +} diff --git a/chains/tendermint_34/libs/pubsub/query/query_test.go b/chains/tendermint_34/libs/pubsub/query/query_test.go new file mode 100755 index 0000000..d1810f4 --- /dev/null +++ b/chains/tendermint_34/libs/pubsub/query/query_test.go @@ -0,0 +1,87 @@ +package query_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" +) + +func TestMatches(t *testing.T) { + var ( + txDate = "2017-01-01" + txTime = "2018-05-03T14:45:00Z" + ) + + testCases := []struct { + s string + tags map[string]string + err bool + matches bool + }{ + {"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true}, + + {"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true}, + {"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true}, + {"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true}, + {"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true}, + {"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true}, + {"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true}, + {"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true}, + {"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false}, + + {"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true}, + {"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true}, + {"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false}, + + {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true}, + {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false}, + + {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true}, + {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + if !tc.err { + require.Nil(t, err) + } + + if tc.matches { + assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags) + } else { + assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags) + } + } +} + +func TestMustParse(t *testing.T) { + assert.Panics(t, func() { query.MustParse("=") }) + assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) +} + +func TestConditions(t *testing.T) { + txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") + require.NoError(t, err) + + testCases := []struct { + s string + conditions []query.Condition + }{ + {s: "tm.events.type='NewBlock'", conditions: []query.Condition{{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}}, + {s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, {Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}}, + {s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + require.Nil(t, err) + + assert.Equal(t, tc.conditions, q.Conditions()) + } +} diff --git a/chains/tendermint_34/libs/test.sh b/chains/tendermint_34/libs/test.sh new file mode 100755 index 0000000..ecf17fc --- /dev/null +++ b/chains/tendermint_34/libs/test.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -e + +# run the linter +# make metalinter_test + +# setup certs +make gen_certs + +# run the unit tests with coverage +echo "" > coverage.txt +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done + +# cleanup certs +make clean_certs diff --git a/chains/tendermint_34/libs/test/assert.go b/chains/tendermint_34/libs/test/assert.go new file mode 100755 index 0000000..a6ffed0 --- /dev/null +++ b/chains/tendermint_34/libs/test/assert.go @@ -0,0 +1,14 @@ +package test + +import ( + "testing" +) + +func AssertPanics(t *testing.T, msg string, f func()) { + defer func() { + if err := recover(); err == nil { + t.Errorf("Should have panic'd, but didn't: %v", msg) + } + }() + f() +} diff --git a/chains/tendermint_34/libs/test/mutate.go b/chains/tendermint_34/libs/test/mutate.go new file mode 100755 index 0000000..3bbbbd2 --- /dev/null +++ b/chains/tendermint_34/libs/test/mutate.go @@ -0,0 +1,28 @@ +package test + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +func MutateByteSlice(bytez []byte) []byte { + // If bytez is empty, panic + if len(bytez) == 0 { + panic("Cannot mutate an empty bytez") + } + + // Copy bytez + mBytez := make([]byte, len(bytez)) + copy(mBytez, bytez) + bytez = mBytez + + // Try a random mutation + switch cmn.RandInt() % 2 { + case 0: // Mutate a single byte + bytez[cmn.RandInt()%len(bytez)] += byte(cmn.RandInt()%255 + 1) + case 1: // Remove an arbitrary byte + pos := cmn.RandInt() % len(bytez) + bytez = append(bytez[:pos], bytez[pos+1:]...) + } + return bytez +} diff --git a/chains/tendermint_34/libs/version/version.go b/chains/tendermint_34/libs/version/version.go new file mode 100755 index 0000000..6e73a93 --- /dev/null +++ b/chains/tendermint_34/libs/version/version.go @@ -0,0 +1,3 @@ +package version + +const Version = "0.9.0" diff --git a/chains/tendermint_34/structsMarlin.go b/chains/tendermint_34/structsMarlin.go new file mode 100644 index 0000000..a41379e --- /dev/null +++ b/chains/tendermint_34/structsMarlin.go @@ -0,0 +1,60 @@ +package irisnet + +import ( + "net" + "sync" + + "github.com/gogo/protobuf/proto" + tmp2p "github.com/tendermint/tendermint/proto/p2p" + lru "github.com/hashicorp/golang-lru" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/supragya/tendermint_connector/chains/irisnet/conn" + marlinTypes "github.com/supragya/tendermint_connector/types" +) + +type TendermintHandler struct { + servicedChainId uint32 + listenPort int + isConnectionOutgoing bool + peerAddr string + rpcAddr string + privateKey ed25519.PrivKeyEd25519 + baseConnection net.Conn + validatorCache *lru.TwoQueueCache + maxValidHeight int64 + secretConnection *conn.SecretConnection + marlinTo chan marlinTypes.MarlinMessage + marlinFrom chan marlinTypes.MarlinMessage + channelBuffer map[byte][]marlinTypes.PacketMsg + peerNodeInfo DefaultNodeInfo + p2pConnection P2PConnection + throughput throughPutData + signalConnError chan struct{} + signalShutSend chan struct{} + signalShutRecv chan struct{} + signalShutThroughput chan struct{} + //no codec file needed, In protobuf the codec file could be considered as + // proto file that is declared in github.com/gogo/protobuf/proto +} + +type throughPutData struct { + isDataConnect bool + toTMCore map[string]uint32 + fromTMCore map[string]uint32 + spam map[string]uint32 + mu sync.Mutex +} + +type keyData struct { + Chain string + IdString string + PrivateKeyString string + PublicKeyString string + PrivateKey [64]byte + PublicKey [32]byte +} + +type Validator struct { + PublicKey ed25519.PubKeyEd25519 + Address string +} \ No newline at end of file diff --git a/chains/tendermint_34/structsTendermint.go b/chains/tendermint_34/structsTendermint.go new file mode 100644 index 0000000..26acf72 --- /dev/null +++ b/chains/tendermint_34/structsTendermint.go @@ -0,0 +1,552 @@ +package tendermint_34 + +import ( + "net" + "bufio" + "sync" + "fmt" + "time" + "errors" + + "github.com/gogo/protobuf/proto" + tmp2p "github.com/tendermint/tendermint/proto/p2p" + cmn "github.com/supragya/tendermint_connector/chains/irisnet/libs/common" + flow "github.com/supragya/tendermint_connector/chains/irisnet/libs/flowrate" + "github.com/tendermint/tendermint/crypto/merkle" +) + +type ProtocolVersion struct { + P2P uint64 `json:"p2p"` + Block uint64 `json:"block"` + App uint64 `json:"app"` +} + +type DefaultNodeInfo struct { + ProtocolVersion ProtocolVersion `json:"protocol_version"` + + ID_ string `json:"id"` // authenticated ideamino "github.com/tendermint/go-amino"ntifier + ListenAddr string `json:"listen_addr"` // accepting incoming + + // Check compatibility. + // Channels are HexBytes so easier to read as JSON + Network string `json:"network"` // network/chain ID + Version string `json:"version"` // major.minor.revision + Channels cmn.HexBytes `json:"channels"` // channels this node knows about + + // ASCIIText fields + Moniker string `json:"moniker"` // arbitrary moniker + Other DefaultNodeInfoOther `json:"other"` // other application specific data +} + +// DefaultNodeInfoOther is the misc. applcation specific data +type DefaultNodeInfoOther struct { + TxIndex string `json:"tx_index"` + RPCAddress string `json:"rpc_address"` +} + +type P2PConnection struct { + conn net.Conn + bufConnReader *bufio.Reader + bufConnWriter *bufio.Writer + sendMonitor *flow.Monitor + recvMonitor *flow.Monitor + send chan struct{} + pong chan struct{} + // channels []*Channel + // channelsIdx map[byte]*Channel + errored uint32 + + // Closing quitSendRoutine will cause the sendRoutine to eventually quit. + // doneSendRoutine is closed when the sendRoutine actually quits. + quitSendRoutine chan struct{} + doneSendRoutine chan struct{} + + // Closing quitRecvRouting will cause the recvRouting to eventually quit. + quitRecvRoutine chan struct{} + + // used to ensure FlushStop and OnStop + // are safe to call concurrently. + stopMtx sync.Mutex + + flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled. + pingTimer *time.Ticker // send pings periodically + + // close conn if pong is not received in pongTimeout + pongTimer *time.Timer + pongTimeoutCh chan bool // true - timeout, false - peer sent pong + + chStatsTimer *time.Ticker // update channel stats periodically + + created time.Time // time of creation + + _maxPacketMsgSize int +} + +//---------------------------------------- +// Packet + +type Packet interface { + AssertIsPacket() +} + +func EncodePacket(pb proto.Packet) []byte { + pkt = tmp2p.Packet{} + switch pb := pb.(type){ + case *tmp2p.PacketPing: + pkt.Sum = &tmp2p.PacketPing{PacketPing: pb} + case *tmp2p.PacketPong: + pkt.Sum = &tmp2p.PacketPong{PacketPong: pb} + case *tmp2p.PacketMsg: + pkt.Sum = &tmp2p.PacketMsg{PacketMsg: pb} + + bz, err := proto.Marshal(&pkt) + if err != nil { + panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) + } + return bz +} + +func DecodePacket(bz []byte) (proto.Packet, error){ + pb := &tmp2p.Packet{} + err := proto.Unmarshal(bz, pb) + if err != nil { + return nil, err + } + switch pkt := pb.Sum.(type){ + case *tmp2p.PacketPing: + return pkt.PacketPing, nil + case *tmp2p.PacketPong: + return pkt.PacketPong, nil + case *tmp2p.PacketMsg: + return pkt.PacketMsg, nil + default: + return nil, fmt.Errorf("unknown Packet Type: %T", pkt) +} + + +func (_ PacketPing) AssertIsPacket() {} +func (_ PacketPong) AssertIsPacket() {} +func (_ PacketMsg) AssertIsPacket() {} + +type PacketPing struct { +} + +type PacketPong struct { +} + +type PacketMsg struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} + +func (mp PacketMsg) String() string { + return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) +} + +// Consensus Message +type ConsensusMessage interface { + ValidateBasic() error +} + +func EncodeConsensusMessages(pb proto.Message) []byte{ + msg = tmp2p.Message{} + switch pb := pb.(type){ + case *tmp2p.NewRoundStep: + msg.Sum = &tmp2p.Message_NewRoundStep{NewRoundStep: pb} + case *tmp2p.NewValidBlock: + msg.Sum = &tmp2p.Message_NewValidBlock{NewValidBlock: pb} + case *tmp2p.Proposal: + msg.Sum = &tmp2p.Message_Proposal{Proposal: pb} + case *tmp2p.ProposalPOL: + msg.Sum = &tmp2p.Message_ProposalPOL{ProposalPOL: pb} + case *tmp2p.BlockPart: + msg.Sum = &tmp2p.Message_BlockPart{BlockPart: pb} + case *tmp2p.Vote: + msg.Sum = &tmp2p.Message_Vote{Vote: pb} + case *tmp2p.HasVote: + msg.Sum = &tmp2p.HasVote{HasVote: pb} + case *tmp2p.VoteSetMaj23: + msg.Sum = &tmp2p.VoteSetMaj23{VoteSetMaj23: pb} + case *tmp2p.VoteSetBits: + msg.Sum = &tmp2p.VoteSetBits{VoteSetBits: pb} + bz, err := proto.Marshal(&msg) + + if err != nil { + panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) + } + return bz +} + +func DecodeConsensusMessages(bz []byte) (proto.Message, error){ + pb := &tmp2p.Message{} + err := proto.Unmarshal(bz, pb) + if err != nil { + return nil, err + } + switch msg := pb.Sum.(type) { + case *tmp2p.Message_NewRoundStep: + return msg.NewRoundStep, nil + case *tmp2p.Message_NewValidBlock: + return msg.NewValidBlock, nil + case *tmp2p.Message_Proposal: + return msg.proposal, nil + case *tmp2p.Message_ProposalPOL: + return msg.ProposalPOL, nil + case *tmp2p.Message_BlockPart: + return msg.BlockPart, nil + case *tmp2p.Message_Vote: + return msg.Vote, nil + case *tmp2p.HasVote: + return msg.HasVote, nil + case *tmp2p.VoteSetMaj23: + return msg.VoteSetMaj23, nil + case *tmp2p.VoteSetBits: + return msg.VoteSetBits, nil + default: + return nil, fmt.Errorf("unknown message: %T", msg) +} + +//------------------------------------- + +// NewRoundStepMessage is sent for every step taken in the ConsensusState. +// For every height/round/step transition +type NewRoundStepMessage struct { + Height int64 + Round int + Step uint8 + SecondsSinceStartTime int + LastCommitRound int +} + +// ValidateBasic performs basic validation. +func (m *NewRoundStepMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + // if !m.Step.IsValid() { + // return errors.New("Invalid Step") + // } + + // NOTE: SecondsSinceStartTime may be negative + + if (m.Height == 1 && m.LastCommitRound != -1) || + (m.Height > 1 && m.LastCommitRound < -1) { + return errors.New("Invalid LastCommitRound (for 1st block: -1, for others: >= 0)") + } + return nil +} + +// String returns a string representation. +func (m *NewRoundStepMessage) String() string { + return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", + m.Height, m.Round, m.Step, m.LastCommitRound) +} + +//------------------------------------- + +// NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +type PartSetHeader struct { + Total int `json:"total"` + Hash cmn.HexBytes `json:"hash"` +} + +type BitArray struct { + mtx sync.Mutex + Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported + Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported +} + +// Size returns the number of bits in the bitarray +func (bA *BitArray) Size() int { + if bA == nil { + return 0 + } + return bA.Bits +} + +type NewValidBlockMessage struct { + Height int64 + Round int + BlockPartsHeader PartSetHeader + BlockParts BitArray + IsCommit bool +} + +// ValidateBasic performs basic validation. +func (m *NewValidBlockMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + // if err := m.BlockPartsHeader.ValidateBasic(); err != nil { + // return fmt.Errorf("Wrong BlockPartsHeader: %v", err) + // } + if m.BlockParts.Size() == 0 { + return errors.New("Empty BlockParts") + } + if m.BlockParts.Size() != m.BlockPartsHeader.Total { + return fmt.Errorf("BlockParts bit array size %d not equal to BlockPartsHeader.Total %d", + m.BlockParts.Size(), + m.BlockPartsHeader.Total) + } + // if m.BlockParts.Size() > types.MaxBlockPartsCount { + // return errors.Errorf("BlockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) + // } + return nil +} + +// String returns a string representation. +func (m *NewValidBlockMessage) String() string { + return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", + m.Height, m.Round, m.BlockPartsHeader, m.BlockParts, m.IsCommit) +} + +//------------------------------------- + +type Proposal struct { + Type byte + Height int64 `json:"height"` + Round int `json:"round"` + POLRound int `json:"pol_round"` // -1 if null. + BlockID BlockID `json:"block_id"` + Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` +} + +type BlockID struct { + Hash cmn.HexBytes `json:"hash"` + PartsHeader PartSetHeader `json:"parts"` +} + +// ProposalMessage is sent when a new block is proposed. +type ProposalMessage struct { + Proposal Proposal +} + +// ValidateBasic performs basic validation. +func (m *ProposalMessage) ValidateBasic() error { + return nil +} + +// String returns a string representation. +func (m *ProposalMessage) String() string { + return fmt.Sprintf("[Proposal %v]", m.Proposal) +} + +//------------------------------------- + +// ProposalPOLMessage is sent when a previous proposal is re-proposed. +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int + ProposalPOL *cmn.BitArray +} + +// ValidateBasic performs basic validation. +func (m *ProposalPOLMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.ProposalPOLRound < 0 { + return errors.New("Negative ProposalPOLRound") + } + if m.ProposalPOL.Size() == 0 { + return errors.New("Empty ProposalPOL bit array") + } + // if m.ProposalPOL.Size() > types.MaxVotesCount { + // return errors.Errorf("ProposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) + // } + return nil +} + +// String returns a string representation. +func (m *ProposalPOLMessage) String() string { + return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) +} + +//------------------------------------- + +type Part struct { + Index int `json:"index"` + Bytes cmn.HexBytes `json:"bytes"` + Proof merkle.SimpleProof `json:"proof"` + + // Cache + hash []byte +} + +// BlockPartMessage is sent when gossipping a piece of the proposed block. +type BlockPartMessage struct { + Height int64 + Round int + Part Part +} + +// ValidateBasic performs basic validation. +func (m *BlockPartMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + // if err := m.Part.ValidateBasic(); err != nil { + // return fmt.Errorf("Wrong Part: %v", err) + // } + return nil +} + +// String returns a string representation. +func (m *BlockPartMessage) String() string { + return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) +} + +//------------------------------------- + +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. +type Vote struct { + Type byte `json:"type"` + Height int64 `json:"height"` + Round int `json:"round"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Timestamp time.Time `json:"timestamp"` + ValidatorAddress cmn.HexBytes `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Signature []byte `json:"signature"` +} + +// VoteMessage is sent when voting for a proposal (or lack thereof). +type VoteMessage struct { + Vote Vote +} + +// ValidateBasic performs basic validation. +func (m *VoteMessage) ValidateBasic() error { + return nil +} + +// String returns a string representation. +func (m *VoteMessage) String() string { + return fmt.Sprintf("[Vote %v]", m.Vote) +} + +//------------------------------------- + +// HasVoteMessage is sent to indicate that a particular vote has been received. + +type HasVoteMessage struct { + Height int64 + Round int + Type byte + Index int +} + +// ValidateBasic performs basic validation. +func (m *HasVoteMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + // if !types.IsVoteTypeValid(m.Type) { + // return errors.New("Invalid Type") + // } + if m.Index < 0 { + return errors.New("Negative Index") + } + return nil +} + +// String returns a string representation. +func (m *HasVoteMessage) String() string { + return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) +} + +//------------------------------------- + +// VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. +type VoteSetMaj23Message struct { + Height int64 + Round int + Type byte + BlockID BlockID +} + +// ValidateBasic performs basic validation. +func (m *VoteSetMaj23Message) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + // if !types.IsVoteTypeValid(m.Type) { + // return errors.New("Invalid Type") + // } + // if err := m.BlockID.ValidateBasic(); err != nil { + // return fmt.Errorf("Wrong BlockID: %v", err) + // } + return nil +} + +// String returns a string representation. +func (m *VoteSetMaj23Message) String() string { + return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) +} + +//------------------------------------- + +// VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. +type VoteSetBitsMessage struct { + Height int64 + Round int + Type byte + BlockID BlockID + Votes *cmn.BitArray +} + +// ValidateBasic performs basic validation. +func (m *VoteSetBitsMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + // if !types.IsVoteTypeValid(m.Type) { + // return errors.New("Invalid Type") + // } + // if err := m.BlockID.ValidateBasic(); err != nil { + // return fmt.Errorf("Wrong BlockID: %v", err) + // } + // NOTE: Votes.Size() can be zero if the node does not have any + // if m.Votes.Size() > types.MaxVotesCount { + // return fmt.Errorf("Votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount) + // } + return nil +} + +// String returns a string representation. +func (m *VoteSetBitsMessage) String() string { + return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) +} + +const ( + channelBc = byte(0x40) //bc.BlockchainChannel, + channelCsSt = byte(0x20) //cs.StateChannel, + channelCsDc = byte(0x21) //cs.DataChannel, + channelCsVo = byte(0x22) //cs.VoteChannel, + channelCsVs = byte(0x23) //cs.VoteSetBitsChannel, + channelMm = byte(0x30) //mempl.MempoolChannel, + channelEv = byte(0x38) //evidence.EvidenceChannel, +) diff --git a/cmd/common.go b/cmd/common.go index 4edf723..88a0430 100755 --- a/cmd/common.go +++ b/cmd/common.go @@ -27,6 +27,7 @@ import ( "github.com/supragya/tendermint_connector/chains" "github.com/supragya/tendermint_connector/chains/irisnet" "github.com/supragya/tendermint_connector/chains/cosmos" + "github.com/supragya/tendermint_connector/chains/tendermint_34" ) var peerPort, rpcPort, marlinPort, listenPortPeer int @@ -85,6 +86,9 @@ func findAndRunDataConnectHandler(node chains.NodeType, case cosmos.ServicedTMCore: log.Info("Attaching Cosmos-3 TM Handler to service given TM core") cosmos.RunDataConnect(peerAddr, marlinTo, marlinFrom, isConnectionOutgoing, keyFile, listenPortPeer) + case tendermint_34.ServicedTMCore: + log.Info("Attaching tendermint(.34) Handler to service given TM core") + tendermint_34.RunDataConnect(peerAddr, marlinTo, marlinFrom, isConnectionOutgoing, keyFile, listenPortPeer) default: log.Error("Cannot find any handler for ", node) return @@ -104,6 +108,9 @@ func findAndRunSpamFilterHandler(node chains.NodeType, case cosmos.ServicedTMCore: log.Info("Attaching Cosmos-3 TM spamfilter") cosmos.RunSpamFilter(rpcAddr, marlinTo, marlinFrom) + case cosmos.ServicedTMCore: + log.Info("Attaching tendermint(.34) spamfilter") + tendermint_34.RunSpamFilter(rpcAddr, marlinTo, marlinFrom) default: log.Error("Cannot find any spamfilter for ", node) return diff --git a/cmd/keyfile.go b/cmd/keyfile.go index 4cd258c..8084aad 100755 --- a/cmd/keyfile.go +++ b/cmd/keyfile.go @@ -23,6 +23,7 @@ import ( // "github.com/supragya/tendermint_connector/chains" "github.com/supragya/tendermint_connector/chains/irisnet" "github.com/supragya/tendermint_connector/chains/cosmos" + "github.com/supragya/tendermint_connector/chains/tendermint_34" ) // connectCmd represents the connect command @@ -44,6 +45,12 @@ var keyFileCmd = &cobra.Command{ } else { cosmos.VerifyKeyFile(fileLocation) } + case tendermint_34.ServicedKeyFile: + if isGenerate { + tendermint_34.GenerateKeyFile(fileLocation) + } else { + tendermint_34.VerifyKeyFile(fileLocation) + } default: log.Error("Unknown tendermint chain, can't generate or verify for ", chain) }