Browse Source

megatidy

master
Loki Verloren 3 months ago
parent
commit
8143ef6e7e
100 changed files with 977 additions and 84 deletions
  1. 7
    0
      cmd/ctl/config.go
  2. 4
    0
      cmd/ctl/httpclient.go
  3. 8
    0
      cmd/ctl/version.go
  4. 3
    0
      pkg/chain/accept.go
  5. 6
    0
      pkg/chain/bench_test.go
  6. 27
    1
      pkg/chain/blockindex.go
  7. 44
    1
      pkg/chain/chain.go
  8. 11
    0
      pkg/chain/chain_test.go
  9. 54
    0
      pkg/chain/chainio.go
  10. 14
    0
      pkg/chain/chainio_test.go
  11. 25
    0
      pkg/chain/chainview.go
  12. 35
    21
      pkg/chain/chainview_test.go
  13. 11
    0
      pkg/chain/checkpoints.go
  14. 14
    0
      pkg/chain/common_test.go
  15. 17
    0
      pkg/chain/compress.go
  16. 10
    0
      pkg/chain/compress_test.go
  17. 5
    0
      pkg/chain/config/doc.go
  18. 15
    0
      pkg/chain/config/genesis.go
  19. 12
    0
      pkg/chain/config/genesis_test.go
  20. 9
    1
      pkg/chain/config/params-defs.go
  21. 2
    0
      pkg/chain/config/params-mainnet.go
  22. 3
    0
      pkg/chain/config/params-regtest.go
  23. 3
    0
      pkg/chain/config/params-simnet.go
  24. 2
    0
      pkg/chain/config/params-testnet.go
  25. 12
    0
      pkg/chain/config/params.go
  26. 5
    0
      pkg/chain/config/params_test.go
  27. 6
    0
      pkg/chain/config/register_test.go
  28. 2
    0
      pkg/chain/config/util.go
  29. 12
    0
      pkg/chain/difficulty.go
  30. 6
    0
      pkg/chain/difficulty_test.go
  31. 16
    5
      pkg/chain/doc.go
  32. 12
    0
      pkg/chain/error.go
  33. 6
    0
      pkg/chain/error_test.go
  34. 7
    0
      pkg/chain/example_test.go
  35. 2
    0
      pkg/chain/fullblocktests/fullblocks_test.go
  36. 1
    0
      pkg/chain/hash/doc.go
  37. 12
    0
      pkg/chain/hash/hash.go
  38. 7
    0
      pkg/chain/hash/hash_test.go
  39. 5
    0
      pkg/chain/hash/hashfuncs.go
  40. 5
    0
      pkg/chain/hash/hashfuncs_test.go
  41. 34
    0
      pkg/chain/index/addrindex.go
  42. 12
    0
      pkg/chain/index/addrindex_test.go
  43. 5
    0
      pkg/chain/index/blocklogger.go
  44. 28
    0
      pkg/chain/index/cfindex.go
  45. 12
    0
      pkg/chain/index/common.go
  46. 3
    0
      pkg/chain/index/log.go
  47. 20
    1
      pkg/chain/index/manager.go
  48. 28
    1
      pkg/chain/index/txindex.go
  49. 3
    0
      pkg/chain/log.go
  50. 15
    0
      pkg/chain/mediantime.go
  51. 4
    0
      pkg/chain/mediantime_test.go
  52. 9
    0
      pkg/chain/merkle.go
  53. 5
    0
      pkg/chain/merkle_test.go
  54. 22
    1
      pkg/chain/mining/cpu/cpuminer.go
  55. 3
    0
      pkg/chain/mining/cpu/log.go
  56. 13
    4
      pkg/chain/mining/dispatch/controller.go
  57. 7
    3
      pkg/chain/mining/dispatch/kopach.go
  58. 2
    0
      pkg/chain/mining/dispatch/log.go
  59. 3
    0
      pkg/chain/mining/log.go
  60. 33
    1
      pkg/chain/mining/mining.go
  61. 5
    0
      pkg/chain/mining/mining_test.go
  62. 6
    0
      pkg/chain/mining/policy.go
  63. 8
    0
      pkg/chain/mining/policy_test.go
  64. 9
    0
      pkg/chain/notifications.go
  65. 5
    0
      pkg/chain/notifications_test.go
  66. 7
    0
      pkg/chain/process.go
  67. 10
    0
      pkg/chain/scriptval.go
  68. 5
    0
      pkg/chain/scriptval_test.go
  69. 6
    1
      pkg/chain/sync/blocklogger.go
  70. 2
    2
      pkg/chain/sync/doc.go
  71. 3
    0
      pkg/chain/sync/interface.go
  72. 3
    0
      pkg/chain/sync/log.go
  73. 48
    1
      pkg/chain/sync/manager.go
  74. 16
    0
      pkg/chain/thresholdstate.go
  75. 6
    0
      pkg/chain/thresholdstate_test.go
  76. 4
    0
      pkg/chain/timesorter.go
  77. 4
    0
      pkg/chain/timesorter_test.go
  78. 0
    2
      pkg/chain/tx/UnstableAPI.go
  79. 0
    2
      pkg/chain/tx/author/author.go
  80. 1
    1
      pkg/chain/tx/author/author_test.go
  81. 0
    2
      pkg/chain/tx/author/cprng.go
  82. 5
    0
      pkg/chain/tx/chainntfns.go
  83. 13
    6
      pkg/chain/tx/common.go
  84. 8
    2
      pkg/chain/tx/createtx.go
  85. 2
    1
      pkg/chain/tx/disksync.go
  86. 15
    0
      pkg/chain/tx/loader.go
  87. 7
    0
      pkg/chain/tx/log.go
  88. 0
    3
      pkg/chain/tx/mgr/db.go
  89. 0
    4
      pkg/chain/tx/mgr/error.go
  90. 1
    1
      pkg/chain/tx/mgr/example_test.go
  91. 0
    2
      pkg/chain/tx/mgr/kahnsort.go
  92. 0
    3
      pkg/chain/tx/mgr/query.go
  93. 1
    1
      pkg/chain/tx/mgr/query_test.go
  94. 0
    4
      pkg/chain/tx/mgr/tx.go
  95. 2
    0
      pkg/chain/tx/mgr/tx_test.go
  96. 0
    3
      pkg/chain/tx/mgr/unconfirmed.go
  97. 4
    2
      pkg/chain/tx/multisig.go
  98. 27
    1
      pkg/chain/tx/notifications.go
  99. 26
    0
      pkg/chain/tx/recovery.go
  100. 0
    0
      pkg/chain/tx/recovery_test.go

+ 7
- 0
cmd/ctl/config.go View File

@@ -1,15 +1,19 @@
1 1
 package ctl
2
+
2 3
 import (
3 4
 	"fmt"
4 5
 	"os"
5 6
 	"path/filepath"
6 7
 	"strings"
8
+
7 9
 	"git.parallelcoin.io/dev/9/pkg/pod"
8 10
 	"git.parallelcoin.io/dev/9/pkg/rpc/json"
9 11
 	"git.parallelcoin.io/dev/9/pkg/util"
10 12
 )
13
+
11 14
 // unusableFlags are the command usage flags which this utility are not able to use.  In particular it doesn't support websockets and consequently notifications.
12 15
 const unusableFlags = json.UFWebsocketOnly | json.UFNotification
16
+
13 17
 var DefaultConfigFile = filepath.Join(PodCtlHomeDir, "conf.json")
14 18
 var DefaultRPCCertFile = filepath.Join(NodeHomeDir, "rpc.cert")
15 19
 var DefaultRPCServer = "127.0.0.1:11048"
@@ -18,6 +22,7 @@ var DefaultWalletCertFile = filepath.Join(SPVHomeDir, "rpc.cert")
18 22
 var NodeHomeDir = util.AppDataDir("pod", false)
19 23
 var PodCtlHomeDir = util.AppDataDir("pod/ctl", false)
20 24
 var SPVHomeDir = util.AppDataDir("pod/spv", false)
25
+
21 26
 // ListCommands categorizes and lists all of the usable commands along with their one-line usage.
22 27
 func ListCommands() {
23 28
 	const (
@@ -63,6 +68,7 @@ func ListCommands() {
63 68
 		fmt.Println()
64 69
 	}
65 70
 }
71
+
66 72
 // cleanAndExpandPath expands environement variables and leading ~ in the passed path, cleans the result, and returns it.
67 73
 func cleanAndExpandPath(
68 74
 	path string,
@@ -75,6 +81,7 @@ func cleanAndExpandPath(
75 81
 	// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%, but they variables can still be expanded via POSIX-style $VARIABLE.
76 82
 	return filepath.Clean(os.ExpandEnv(path))
77 83
 }
84
+
78 85
 // loadConfig initializes and parses the config using a config file and command line options.
79 86
 // The configuration proceeds as follows:
80 87
 // 	1) Start with a default config with sane settings

+ 4
- 0
cmd/ctl/httpclient.go View File

@@ -1,4 +1,5 @@
1 1
 package ctl
2
+
2 3
 import (
3 4
 	"bytes"
4 5
 	"crypto/tls"
@@ -8,10 +9,12 @@ import (
8 9
 	"io/ioutil"
9 10
 	"net"
10 11
 	"net/http"
12
+
11 13
 	"git.parallelcoin.io/dev/9/cmd/nine"
12 14
 	"git.parallelcoin.io/dev/9/pkg/rpc/json"
13 15
 	"github.com/btcsuite/go-socks/socks"
14 16
 )
17
+
15 18
 // newHTTPClient returns a new HTTP client that is configured according to the proxy and TLS settings in the associated connection configuration.
16 19
 func newHTTPClient(cfg *nine.Config) (*http.Client, error) {
17 20
 	// Configure proxy if needed.
@@ -53,6 +56,7 @@ func newHTTPClient(cfg *nine.Config) (*http.Client, error) {
53 56
 	}
54 57
 	return &client, nil
55 58
 }
59
+
56 60
 // sendPostRequest sends the marshalled JSON-RPC command using HTTP-POST mode to the server described in the passed config struct.  It also attempts to unmarshal the response as a JSON-RPC response and returns either the result field or the error field depending on whether or not there is an error.
57 61
 func sendPostRequest(marshalledJSON []byte, cfg *nine.Config) ([]byte, error) {
58 62
 	// Generate a request to the configured RPC server.

+ 8
- 0
cmd/ctl/version.go View File

@@ -1,21 +1,28 @@
1 1
 package ctl
2
+
2 3
 import (
3 4
 	"bytes"
4 5
 	"fmt"
5 6
 	"strings"
6 7
 )
8
+
7 9
 // semanticAlphabet
8 10
 const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
11
+
9 12
 // These constants define the application version and follow the semantic versioning 2.0.0 spec (http://semver.org/).
10 13
 const appMajor uint = 0
11 14
 const appMinor uint = 1
12 15
 const appPatch uint = 14
16
+
13 17
 // appPreRelease MUST only contain characters from semanticAlphabet per the semantic versioning spec.
14 18
 const appPreRelease = "alpha"
19
+
15 20
 // Version is exported so controlling apps can print this information
16 21
 var Version = version
22
+
17 23
 // appBuild is defined as a variable so it can be overridden during the build process with '-ldflags "-X main.appBuild foo' if needed.  It MUST only contain characters from semanticAlphabet per the semantic versioning spec.
18 24
 var appBuild string
25
+
19 26
 // normalizeVerString returns the passed string stripped of all characters which are not valid according to the semantic versioning guidelines for pre-release version and build metadata strings.  In particular they MUST only contain characters in semanticAlphabet.
20 27
 func normalizeVerString(
21 28
 	str string,
@@ -29,6 +36,7 @@ func normalizeVerString(
29 36
 	}
30 37
 	return result.String()
31 38
 }
39
+
32 40
 // version returns the application version as a properly formed string per the semantic versioning 2.0.0 spec (http://semver.org/).
33 41
 func version() string {
34 42
 	// Start with the major, minor, and patch versions.

+ 3
- 0
pkg/chain/accept.go View File

@@ -1,9 +1,12 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	"fmt"
5
+
4 6
 	database "git.parallelcoin.io/dev/9/pkg/db"
5 7
 	"git.parallelcoin.io/dev/9/pkg/util"
6 8
 )
9
+
7 10
 // maybeAcceptBlock potentially accepts a block into the block chain and, if accepted, returns whether or not it is on the main chain.  It performs several validation checks which depend on its position within the block chain before adding it.  The block is expected to have already gone through ProcessBlock before calling this function with it. The flags are also passed to checkBlockContext and connectBestChain.  See their documentation for how the flags modify their behavior. This function MUST be called with the chain state lock held (for writes).
8 11
 func (b *BlockChain) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) (bool, error) {
9 12
 	// The height of this block is one more than the referenced previous block.

+ 6
- 0
pkg/chain/bench_test.go View File

@@ -1,8 +1,13 @@
1
+// +build test
2
+
1 3
 package chain
4
+
2 5
 import (
3 6
 	"testing"
7
+
4 8
 	"git.parallelcoin.io/dev/9/pkg/util"
5 9
 )
10
+
6 11
 // BenchmarkIsCoinBase performs a simple benchmark against the IsCoinBase function.
7 12
 func BenchmarkIsCoinBase(
8 13
 	b *testing.B) {
@@ -12,6 +17,7 @@ func BenchmarkIsCoinBase(
12 17
 		IsCoinBase(tx)
13 18
 	}
14 19
 }
20
+
15 21
 // BenchmarkIsCoinBaseTx performs a simple benchmark against the IsCoinBaseTx function.
16 22
 func BenchmarkIsCoinBaseTx(
17 23
 	b *testing.B) {

+ 27
- 1
pkg/chain/blockindex.go View File

@@ -1,18 +1,22 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	"math/big"
4 5
 	"sort"
5 6
 	"sync"
6 7
 	"time"
7
-	"git.parallelcoin.io/dev/9/pkg/chain/fork"
8
+
8 9
 	chaincfg "git.parallelcoin.io/dev/9/pkg/chain/config"
10
+	"git.parallelcoin.io/dev/9/pkg/chain/fork"
9 11
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
10 12
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
11 13
 	database "git.parallelcoin.io/dev/9/pkg/db"
12 14
 	cl "git.parallelcoin.io/dev/9/pkg/util/cl"
13 15
 )
16
+
14 17
 // blockStatus is a bit field representing the validation state of the block.
15 18
 type blockStatus byte
19
+
16 20
 const (
17 21
 	// statusDataStored indicates that the block's payload is stored on disk.
18 22
 	statusDataStored blockStatus = 1 << iota
@@ -28,18 +32,22 @@ const (
28 32
 	// NOTE: This must be defined last in order to avoid influencing iota.
29 33
 	statusNone blockStatus = 0
30 34
 )
35
+
31 36
 // HaveData returns whether the full block data is stored in the database. This will return false for a block node where only the header is downloaded or kept.
32 37
 func (status blockStatus) HaveData() bool {
33 38
 	return status&statusDataStored != 0
34 39
 }
40
+
35 41
 // KnownValid returns whether the block is known to be valid. This will return false for a valid block that has not been fully validated yet.
36 42
 func (status blockStatus) KnownValid() bool {
37 43
 	return status&statusValid != 0
38 44
 }
45
+
39 46
 // KnownInvalid returns whether the block is known to be invalid. This may be because the block itself failed validation or any of its ancestors is invalid. This will return false for invalid blocks that have not been proven invalid yet.
40 47
 func (status blockStatus) KnownInvalid() bool {
41 48
 	return status&(statusValidateFailed|statusInvalidAncestor) != 0
42 49
 }
50
+
43 51
 // blockNode represents a block within the block chain and is primarily used to aid in selecting the best chain to be the main chain.  The main chain is stored into the block database.
44 52
 type blockNode struct {
45 53
 	// NOTE: Additions, deletions, or modifications to the order of the definitions in this struct should not be changed without considering how it affects alignment on 64-bit platforms.  The current order is specifically crafted to result in minimal padding.  There will be hundreds of thousands of these in memory, so a few extra bytes of padding adds up.
@@ -60,6 +68,7 @@ type blockNode struct {
60 68
 	// status is a bitfield representing the validation state of the block. The status field, unlike the other fields, may be written to and so should only be accessed using the concurrent-safe NodeStatus method on blockIndex once the node has been added to the global index.
61 69
 	status blockStatus
62 70
 }
71
+
63 72
 // initBlockNode initializes a block node from the given header and parent node, calculating the height and workSum from the respective fields on the parent. This function is NOT safe for concurrent access.  It must only be called when initially creating a node.
64 73
 func initBlockNode(
65 74
 	node *blockNode, blockHeader *wire.BlockHeader, parent *blockNode) {
@@ -79,6 +88,7 @@ func initBlockNode(
79 88
 		node.workSum = node.workSum.Add(parent.workSum, node.workSum)
80 89
 	}
81 90
 }
91
+
82 92
 // newBlockNode returns a new block node for the given block header and parent node, calculating the height and workSum from the respective fields on the parent. This function is NOT safe for concurrent access.
83 93
 func newBlockNode(
84 94
 	blockHeader *wire.BlockHeader, parent *blockNode) *blockNode {
@@ -86,6 +96,7 @@ func newBlockNode(
86 96
 	initBlockNode(&node, blockHeader, parent)
87 97
 	return &node
88 98
 }
99
+
89 100
 // Header constructs a block header from the node and returns it. This function is safe for concurrent access.
90 101
 func (node *blockNode) Header() wire.BlockHeader {
91 102
 	// No lock is needed because all accessed fields are immutable.
@@ -102,6 +113,7 @@ func (node *blockNode) Header() wire.BlockHeader {
102 113
 		Nonce:      node.nonce,
103 114
 	}
104 115
 }
116
+
105 117
 // Ancestor returns the ancestor block node at the provided height by following the chain backwards from this node.  The returned block will be nil when a height is requested that is after the height of the passed node or is less than zero. This function is safe for concurrent access.
106 118
 func (node *blockNode) Ancestor(height int32) *blockNode {
107 119
 	if height < 0 || height > node.height {
@@ -113,11 +125,13 @@ func (node *blockNode) Ancestor(height int32) *blockNode {
113 125
 	}
114 126
 	return n
115 127
 }
128
+
116 129
 // RelativeAncestor returns the ancestor block node a relative 'distance' blocks before this node.  This is equivalent to calling Ancestor with the node's height minus provided distance.
117 130
 // This function is safe for concurrent access.
118 131
 func (node *blockNode) RelativeAncestor(distance int32) *blockNode {
119 132
 	return node.Ancestor(node.height - distance)
120 133
 }
134
+
121 135
 // CalcPastMedianTime calculates the median time of the previous few blocks prior to, and including, the block node. This function is safe for concurrent access.
122 136
 func (node *blockNode) CalcPastMedianTime() time.Time {
123 137
 	// Create a slice of the previous few block timestamps used to calculate the median per the number defined by the constant medianTimeBlocks.
@@ -137,6 +151,7 @@ func (node *blockNode) CalcPastMedianTime() time.Time {
137 151
 	medianTimestamp := timestamps[numNodes/2]
138 152
 	return time.Unix(medianTimestamp, 0)
139 153
 }
154
+
140 155
 // blockIndex provides facilities for keeping track of an in-memory index of the block chain.  Although the name block chain suggests a single chain of blocks, it is actually a tree-shaped structure where any node can have multiple children.  However, there can only be one active branch which does indeed form a chain from the tip all the way back to the genesis block.
141 156
 type blockIndex struct {
142 157
 	// The following fields are set when the instance is created and can't be changed afterwards, so there is no need to protect them with a separate mutex.
@@ -146,6 +161,7 @@ type blockIndex struct {
146 161
 	index map[chainhash.Hash]*blockNode
147 162
 	dirty map[*blockNode]struct{}
148 163
 }
164
+
149 165
 // newBlockIndex returns a new empty instance of a block index.  The index will be dynamically populated as block nodes are loaded from the database and manually added.
150 166
 func newBlockIndex(
151 167
 	db database.DB, chainParams *chaincfg.Params) *blockIndex {
@@ -156,6 +172,7 @@ func newBlockIndex(
156 172
 		dirty:       make(map[*blockNode]struct{}),
157 173
 	}
158 174
 }
175
+
159 176
 // HaveBlock returns whether or not the block index contains the provided hash. This function is safe for concurrent access.
160 177
 func (bi *blockIndex) HaveBlock(hash *chainhash.Hash) bool {
161 178
 	bi.RLock()
@@ -163,6 +180,7 @@ func (bi *blockIndex) HaveBlock(hash *chainhash.Hash) bool {
163 180
 	bi.RUnlock()
164 181
 	return hasBlock
165 182
 }
183
+
166 184
 // LookupNode returns the block node identified by the provided hash.  It will return nil if there is no entry for the hash. This function is safe for concurrent access.
167 185
 func (bi *blockIndex) LookupNode(hash *chainhash.Hash) *blockNode {
168 186
 	bi.RLock()
@@ -170,6 +188,7 @@ func (bi *blockIndex) LookupNode(hash *chainhash.Hash) *blockNode {
170 188
 	bi.RUnlock()
171 189
 	return node
172 190
 }
191
+
173 192
 // AddNode adds the provided node to the block index and marks it as dirty. Duplicate entries are not checked so it is up to caller to avoid adding them. This function is safe for concurrent access.
174 193
 func (bi *blockIndex) AddNode(node *blockNode) {
175 194
 	bi.Lock()
@@ -177,10 +196,12 @@ func (bi *blockIndex) AddNode(node *blockNode) {
177 196
 	bi.dirty[node] = struct{}{}
178 197
 	bi.Unlock()
179 198
 }
199
+
180 200
 // addNode adds the provided node to the block index, but does not mark it as dirty. This can be used while initializing the block index. This function is NOT safe for concurrent access.
181 201
 func (bi *blockIndex) addNode(node *blockNode) {
182 202
 	bi.index[node.hash] = node
183 203
 }
204
+
184 205
 // NodeStatus provides concurrent-safe access to the status field of a node. This function is safe for concurrent access.
185 206
 func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
186 207
 	bi.RLock()
@@ -188,6 +209,7 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
188 209
 	bi.RUnlock()
189 210
 	return status
190 211
 }
212
+
191 213
 // SetStatusFlags flips the provided status flags on the block node to on, regardless of whether they were on or off previously. This does not unset any flags currently on. This function is safe for concurrent access.
192 214
 func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
193 215
 	bi.Lock()
@@ -195,6 +217,7 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
195 217
 	bi.dirty[node] = struct{}{}
196 218
 	bi.Unlock()
197 219
 }
220
+
198 221
 // UnsetStatusFlags flips the provided status flags on the block node to off, regardless of whether they were on or off previously. This function is safe for concurrent access.
199 222
 func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
200 223
 	bi.Lock()
@@ -202,6 +225,7 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
202 225
 	bi.dirty[node] = struct{}{}
203 226
 	bi.Unlock()
204 227
 }
228
+
205 229
 // flushToDB writes all dirty block nodes to the database. If all writes succeed, this clears the dirty set.
206 230
 func (bi *blockIndex) flushToDB() error {
207 231
 	bi.Lock()
@@ -228,10 +252,12 @@ func (bi *blockIndex) flushToDB() error {
228 252
 	bi.Unlock()
229 253
 	return err
230 254
 }
255
+
231 256
 // GetAlgo returns the algorithm of a block node
232 257
 func (node *blockNode) GetAlgo() int32 {
233 258
 	return node.version
234 259
 }
260
+
235 261
 // GetLastWithAlgo returns the newest block from node with specified algo
236 262
 func (node *blockNode) GetLastWithAlgo(algo int32) (prev *blockNode) {
237 263
 	if prev == nil {

+ 44
- 1
pkg/chain/chain.go View File

@@ -1,32 +1,38 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	"container/list"
4 5
 	"fmt"
5 6
 	"sync"
6 7
 	"time"
7
-	cl "git.parallelcoin.io/dev/9/pkg/util/cl"
8
+
8 9
 	chaincfg "git.parallelcoin.io/dev/9/pkg/chain/config"
9 10
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
10 11
 	txscript "git.parallelcoin.io/dev/9/pkg/chain/tx/script"
11 12
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
12 13
 	database "git.parallelcoin.io/dev/9/pkg/db"
13 14
 	"git.parallelcoin.io/dev/9/pkg/util"
15
+	cl "git.parallelcoin.io/dev/9/pkg/util/cl"
14 16
 )
17
+
15 18
 const (
16 19
 	// maxOrphanBlocks is the maximum number of orphan blocks that can be queued.
17 20
 	maxOrphanBlocks = 100
18 21
 )
22
+
19 23
 // BlockLocator is used to help locate a specific block.  The algorithm for building the block locator is to add the hashes in reverse order until the genesis block is reached.  In order to keep the list of locator hashes to a reasonable number of entries, first the most recent previous 12 block hashes are added, then the step is doubled each loop iteration to exponentially decrease the number of hashes as a function of the distance from the block being located. For example, assume a block chain with a side chain as depicted below:
20 24
 // 	genesis -> 1 -> 2 -> ... -> 15 -> 16  -> 17  -> 18
21 25
 // 	                              \-> 16a -> 17a
22 26
 // The block locator for block 17a would be the hashes of blocks:
23 27
 // [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis]
24 28
 type BlockLocator []*chainhash.Hash
29
+
25 30
 // orphanBlock represents a block that we don't yet have the parent for.  It is a normal block plus an expiration time to prevent caching the orphan forever.
26 31
 type orphanBlock struct {
27 32
 	block      *util.Block
28 33
 	expiration time.Time
29 34
 }
35
+
30 36
 // BestState houses information about the current best block and other info related to the state of the main chain as it exists from the point of view of the current best block. The BestSnapshot method can be used to obtain access to this information in a concurrent safe manner and the data will not be changed out from under the caller when chain state changes occur as the function name implies. However, the returned snapshot must be treated as immutable since it is shared by all callers.
31 37
 type BestState struct {
32 38
 	Hash        chainhash.Hash // The hash of the block.
@@ -39,6 +45,7 @@ type BestState struct {
39 45
 	TotalTxns   uint64    // The total number of txns in the chain.
40 46
 	MedianTime  time.Time // Median time as per CalcPastMedianTime.
41 47
 }
48
+
42 49
 // newBestState returns a new best stats instance for the given parameters.
43 50
 func newBestState(
44 51
 	node *blockNode, blockSize, blockWeight, numTxns,
@@ -55,6 +62,7 @@ func newBestState(
55 62
 		MedianTime:  medianTime,
56 63
 	}
57 64
 }
65
+
58 66
 // BlockChain provides functions for working with the bitcoin block chain. It includes functionality such as rejecting duplicate blocks, ensuring blocks follow all rules, orphan handling, checkpoint handling, and best chain selection with reorganization.
59 67
 type BlockChain struct {
60 68
 	// The following fields are set when the instance is created and can't be changed afterwards, so there is no need to protect them with a separate mutex.
@@ -106,6 +114,7 @@ type BlockChain struct {
106 114
 	// DifficultyAdjustments keeps track of the latest difficulty adjustment for each algorithm
107 115
 	DifficultyAdjustments map[string]float64
108 116
 }
117
+
109 118
 // HaveBlock returns whether or not the chain instance has the block represented by the passed hash.  This includes checking the various places a block can be like part of the main chain, on a side chain, or in the orphan pool. This function is safe for concurrent access.
110 119
 func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) {
111 120
 	exists, err := b.blockExists(hash)
@@ -114,6 +123,7 @@ func (b *BlockChain) HaveBlock(hash *chainhash.Hash) (bool, error) {
114 123
 	}
115 124
 	return exists || b.IsKnownOrphan(hash), nil
116 125
 }
126
+
117 127
 // IsKnownOrphan returns whether the passed hash is currently a known orphan. Keep in mind that only a limited number of orphans are held onto for a limited amount of time, so this function must not be used as an absolute way to test if a block is an orphan block.  A full block (as opposed to just its hash) must be passed to ProcessBlock for that purpose.  However, calling ProcessBlock with an orphan that already exists results in an error, so this function provides a mechanism for a caller to intelligently detect *recent* duplicate orphans and react accordingly. This function is safe for concurrent access.
118 128
 func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool {
119 129
 	// Protect concurrent access.  Using a read lock only so multiple readers can query without blocking each other.
@@ -122,6 +132,7 @@ func (b *BlockChain) IsKnownOrphan(hash *chainhash.Hash) bool {
122 132
 	b.orphanLock.RUnlock()
123 133
 	return exists
124 134
 }
135
+
125 136
 // GetOrphanRoot returns the head of the chain for the provided hash from the map of orphan blocks. This function is safe for concurrent access.
126 137
 func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash {
127 138
 	// Protect concurrent access.  Using a read lock only so multiple readers can query without blocking each other.
@@ -140,6 +151,7 @@ func (b *BlockChain) GetOrphanRoot(hash *chainhash.Hash) *chainhash.Hash {
140 151
 	}
141 152
 	return orphanRoot
142 153
 }
154
+
143 155
 // removeOrphanBlock removes the passed orphan block from the orphan pool and previous orphan index.
144 156
 func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) {
145 157
 	// Protect concurrent access.
@@ -166,6 +178,7 @@ func (b *BlockChain) removeOrphanBlock(orphan *orphanBlock) {
166 178
 		delete(b.prevOrphans, *prevHash)
167 179
 	}
168 180
 }
181
+
169 182
 // addOrphanBlock adds the passed block (which is already determined to be an orphan prior calling this function) to the orphan pool.  It lazily cleans up any expired blocks so a separate cleanup poller doesn't need to be run. It also imposes a maximum limit on the number of outstanding orphan blocks and will remove the oldest received orphan block if the limit is exceeded.
170 183
 func (b *BlockChain) addOrphanBlock(block *util.Block) {
171 184
 	// Remove expired orphan blocks.
@@ -199,17 +212,20 @@ func (b *BlockChain) addOrphanBlock(block *util.Block) {
199 212
 	prevHash := &block.MsgBlock().Header.PrevBlock
200 213
 	b.prevOrphans[*prevHash] = append(b.prevOrphans[*prevHash], oBlock)
201 214
 }
215
+
202 216
 // SequenceLock represents the converted relative lock-time in seconds, and absolute block-height for a transaction input's relative lock-times. According to SequenceLock, after the referenced input has been confirmed within a block, a transaction spending that input can be included into a block either after 'seconds' (according to past median time), or once the 'BlockHeight' has been reached.
203 217
 type SequenceLock struct {
204 218
 	Seconds     int64
205 219
 	BlockHeight int32
206 220
 }
221
+
207 222
 // CalcSequenceLock computes a relative lock-time SequenceLock for the passed transaction using the passed UtxoViewpoint to obtain the past median time for blocks in which the referenced inputs of the transactions were included within. The generated SequenceLock lock can be used in conjunction with a block height, and adjusted median block time to determine if all the inputs referenced within a transaction have reached sufficient maturity allowing the candidate transaction to be included in a block. This function is safe for concurrent access.
208 223
 func (b *BlockChain) CalcSequenceLock(tx *util.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) {
209 224
 	b.chainLock.Lock()
210 225
 	defer b.chainLock.Unlock()
211 226
 	return b.calcSequenceLock(b.bestChain.Tip(), tx, utxoView, mempool)
212 227
 }
228
+
213 229
 // calcSequenceLock computes the relative lock-times for the passed transaction. See the exported version, CalcSequenceLock for further details. This function MUST be called with the chain state lock held (for writes).
214 230
 func (b *BlockChain) calcSequenceLock(node *blockNode, tx *util.Tx, utxoView *UtxoViewpoint, mempool bool) (*SequenceLock, error) {
215 231
 	// A value of -1 for each relative lock type represents a relative time lock value that will allow a transaction to be included in a block at any given height or time. This value is returned as the relative lock time in the case that BIP 68 is disabled, or has not yet been activated.
@@ -278,6 +294,7 @@ func (b *BlockChain) calcSequenceLock(node *blockNode, tx *util.Tx, utxoView *Ut
278 294
 	}
279 295
 	return sequenceLock, nil
280 296
 }
297
+
281 298
 // LockTimeToSequence converts the passed relative locktime to a sequence number in accordance to BIP-68.
282 299
 // See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki
283 300
 //  * (Compatibility)
@@ -291,6 +308,7 @@ func LockTimeToSequence(
291 308
 	return wire.SequenceLockTimeIsSeconds |
292 309
 		locktime>>wire.SequenceLockTimeGranularity
293 310
 }
311
+
294 312
 // getReorganizeNodes finds the fork point between the main chain and the passed node and returns a list of block nodes that would need to be detached from the main chain and a list of block nodes that would need to be attached to the fork point (which will be the end of the main chain after detaching the returned list of block nodes) in order to reorganize the chain such that the passed node is the new end of the main chain.  The lists will be empty if the passed node is not on a side chain. This function may modify node statuses in the block index without flushing. This function MUST be called with the chain state lock held (for reads).
295 313
 func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List) {
296 314
 	attachNodes := list.New()
@@ -327,6 +345,7 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List
327 345
 	}
328 346
 	return detachNodes, attachNodes
329 347
 }
348
+
330 349
 // connectBlock handles connecting the passed node/block to the end of the main (best) chain.
331 350
 // This passed utxo view must have all referenced txos the block spends marked as spent and all of the new txos the block creates added to it.  In addition, the passed stxos slice must be populated with all of the information for the spent txos.  This approach is used because the connection validation that must happen prior to calling this function requires the same details, so it would be inefficient to repeat it. This function MUST be called with the chain state lock held (for writes).
332 351
 func (b *BlockChain) connectBlock(node *blockNode, block *util.Block,
@@ -422,6 +441,7 @@ func (b *BlockChain) connectBlock(node *blockNode, block *util.Block,
422 441
 	b.chainLock.Lock()
423 442
 	return nil
424 443
 }
444
+
425 445
 // disconnectBlock handles disconnecting the passed node/block from the end of the main (best) chain. This function MUST be called with the chain state lock held (for writes).
426 446
 func (b *BlockChain) disconnectBlock(node *blockNode, block *util.Block, view *UtxoViewpoint) error {
427 447
 	// Make sure the node being disconnected is the end of the best chain.
@@ -508,6 +528,7 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *util.Block, view *U
508 528
 	b.chainLock.Lock()
509 529
 	return nil
510 530
 }
531
+
511 532
 // countSpentOutputs returns the number of utxos the passed block spends.
512 533
 func countSpentOutputs(
513 534
 	block *util.Block) int {
@@ -518,6 +539,7 @@ func countSpentOutputs(
518 539
 	}
519 540
 	return numSpent
520 541
 }
542
+
521 543
 // reorganizeChain reorganizes the block chain by disconnecting the nodes in the detachNodes list and connecting the nodes in the attach list.  It expects that the lists are already in the correct order and are in sync with the end of the current best chain.  Specifically, nodes that are being disconnected must be in reverse order (think of popping them off the end of the chain) and nodes the are being attached must be in forwards order (think pushing them onto the end of the chain). This function may modify node statuses in the block index without flushing. This function MUST be called with the chain state lock held (for writes).
522 544
 func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error {
523 545
 	// Nothing to do if no reorganize nodes were provided.
@@ -711,6 +733,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error
711 733
 	}
712 734
 	return nil
713 735
 }
736
+
714 737
 // connectBestChain handles connecting the passed block to the chain while respecting proper chain selection according to the chain with the most proof of work.  In the typical case, the new block simply extends the main chain.  However, it may also be extending (or creating) a side chain (fork) which may or may not end up becoming the main chain depending on which fork cumulatively has the most proof of work.  It returns whether or not the block ended up on the main chain (either due to extending the main chain or causing a reorganization to become the main chain). The flags modify the behavior of this function as follows:
715 738
 //  - BFFastAdd: Avoids several expensive transaction validation operations.
716 739
 //    This is useful when using checkpoints.
@@ -825,6 +848,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *util.Block, flags
825 848
 	}
826 849
 	return err == nil, err
827 850
 }
851
+
828 852
 // isCurrent returns whether or not the chain believes it is current.  Severalfactors are used to guess, but the key factors that allow the chain to believe it is current are:
829 853
 //  - Latest block height is after the latest checkpoint (if enabled)
830 854
 //  - Latest block has a timestamp newer than 24 hours ago
@@ -841,6 +865,7 @@ func (b *BlockChain) isCurrent() bool {
841 865
 	minus24Hours := b.timeSource.AdjustedTime().Add(-24 * time.Hour).Unix()
842 866
 	return b.bestChain.Tip().timestamp >= minus24Hours
843 867
 }
868
+
844 869
 // IsCurrent returns whether or not the chain believes it is current.  Several factors are used to guess, but the key factors that allow the chain to believe it is current are:
845 870
 //  - Latest block height is after the latest checkpoint (if enabled)
846 871
 //  - Latest block has a timestamp newer than 24 hours ago
@@ -850,6 +875,7 @@ func (b *BlockChain) IsCurrent() bool {
850 875
 	defer b.chainLock.RUnlock()
851 876
 	return b.isCurrent()
852 877
 }
878
+
853 879
 // BestSnapshot returns information about the current best chain block and related state as of the current point in time.  The returned instance must be treated as immutable since it is shared by all callers. This function is safe for concurrent access.
854 880
 func (b *BlockChain) BestSnapshot() *BestState {
855 881
 	b.stateLock.RLock()
@@ -857,6 +883,7 @@ func (b *BlockChain) BestSnapshot() *BestState {
857 883
 	b.stateLock.RUnlock()
858 884
 	return snapshot
859 885
 }
886
+
860 887
 // HeaderByHash returns the block header identified by the given hash or an error if it doesn't exist. Note that this will return headers from both the main and side chains.
861 888
 func (b *BlockChain) HeaderByHash(hash *chainhash.Hash) (wire.BlockHeader, error) {
862 889
 	node := b.Index.LookupNode(hash)
@@ -866,11 +893,13 @@ func (b *BlockChain) HeaderByHash(hash *chainhash.Hash) (wire.BlockHeader, error
866 893
 	}
867 894
 	return node.Header(), nil
868 895
 }
896
+
869 897
 // MainChainHasBlock returns whether or not the block with the given hash is in the main chain. This function is safe for concurrent access.
870 898
 func (b *BlockChain) MainChainHasBlock(hash *chainhash.Hash) bool {
871 899
 	node := b.Index.LookupNode(hash)
872 900
 	return node != nil && b.bestChain.Contains(node)
873 901
 }
902
+
874 903
 // BlockLocatorFromHash returns a block locator for the passed block hash. See BlockLocator for details on the algorithm used to create a block locator. In addition to the general algorithm referenced above, this function will return the block locator for the latest known tip of the main (best) chain if the passed hash is not currently known. This function is safe for concurrent access.
875 904
 func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
876 905
 	b.chainLock.RLock()
@@ -879,6 +908,7 @@ func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
879 908
 	b.chainLock.RUnlock()
880 909
 	return locator
881 910
 }
911
+
882 912
 // LatestBlockLocator returns a block locator for the latest known tip of the main (best) chain. This function is safe for concurrent access.
883 913
 func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) {
884 914
 	b.chainLock.RLock()
@@ -886,6 +916,7 @@ func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) {
886 916
 	b.chainLock.RUnlock()
887 917
 	return locator, nil
888 918
 }
919
+
889 920
 // BlockHeightByHash returns the height of the block with the given hash in the main chain. This function is safe for concurrent access.
890 921
 func (b *BlockChain) BlockHeightByHash(hash *chainhash.Hash) (int32, error) {
891 922
 	node := b.Index.LookupNode(hash)
@@ -895,6 +926,7 @@ func (b *BlockChain) BlockHeightByHash(hash *chainhash.Hash) (int32, error) {
895 926
 	}
896 927
 	return node.height, nil
897 928
 }
929
+
898 930
 // BlockHashByHeight returns the hash of the block at the given height in the main chain. This function is safe for concurrent access.
899 931
 func (b *BlockChain) BlockHashByHeight(blockHeight int32) (*chainhash.Hash, error) {
900 932
 	node := b.bestChain.NodeByHeight(blockHeight)
@@ -904,6 +936,7 @@ func (b *BlockChain) BlockHashByHeight(blockHeight int32) (*chainhash.Hash, erro
904 936
 	}
905 937
 	return &node.hash, nil
906 938
 }
939
+
907 940
 // HeightRange returns a range of block hashes for the given start and end heights.  It is inclusive of the start height and exclusive of the end height.  The end height will be limited to the current main chain height. This function is safe for concurrent access.
908 941
 func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash, error) {
909 942
 	// Ensure requested heights are sane.
@@ -937,6 +970,7 @@ func (b *BlockChain) HeightRange(startHeight, endHeight int32) ([]chainhash.Hash
937 970
 	}
938 971
 	return hashes, nil
939 972
 }
973
+
940 974
 // HeightToHashRange returns a range of block hashes for the given start height and end hash, inclusive on both ends.  The hashes are for all blocks that are ancestors of endHash with height greater than or equal to startHeight.  The end hash must belong to a block that is known to be valid. This function is safe for concurrent access.
941 975
 func (b *BlockChain) HeightToHashRange(startHeight int32,
942 976
 	endHash *chainhash.Hash, maxResults int) ([]chainhash.Hash, error) {
@@ -969,6 +1003,7 @@ func (b *BlockChain) HeightToHashRange(startHeight int32,
969 1003
 	}
970 1004
 	return hashes, nil
971 1005
 }
1006
+
972 1007
 // IntervalBlockHashes returns hashes for all blocks that are ancestors of endHash where the block height is a positive multiple of interval. This function is safe for concurrent access.
973 1008
 func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int,
974 1009
 ) ([]chainhash.Hash, error) {
@@ -997,6 +1032,7 @@ func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int,
997 1032
 	}
998 1033
 	return hashes, nil
999 1034
 }
1035
+
1000 1036
 // locateInventory returns the node of the block after the first known block in the locator along with the number of subsequent nodes needed to either reach the provided stop hash or the provided max number of entries. In addition, there are two special cases:
1001 1037
 // - When no locators are provided, the stop hash is treated as a request for that block, so it will either return the node associated with the stop hash if it is known, or nil if it is unknown
1002 1038
 // - When locators are provided, but none of them are known, nodes starting after the genesis block will be returned
@@ -1036,6 +1072,7 @@ func (b *BlockChain) locateInventory(locator BlockLocator, hashStop *chainhash.H
1036 1072
 	}
1037 1073
 	return startNode, total
1038 1074
 }
1075
+
1039 1076
 // locateBlocks returns the hashes of the blocks after the first known block in the locator until the provided stop hash is reached, or up to the provided max number of block hashes. See the comment on the exported function for more details on special cases. This function MUST be called with the chain state lock held (for reads).
1040 1077
 func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash {
1041 1078
 	// Find the node after the first known block in the locator and the total number of nodes after it needed while respecting the stop hash and max entries.
@@ -1051,6 +1088,7 @@ func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash
1051 1088
 	}
1052 1089
 	return hashes
1053 1090
 }
1091
+
1054 1092
 // LocateBlocks returns the hashes of the blocks after the first known block in the locator until the provided stop hash is reached, or up to the provided max number of block hashes.
1055 1093
 // In addition, there are two special cases:
1056 1094
 // - When no locators are provided, the stop hash is treated as a request for that block, so it will either return the stop hash itself if it is known, or nil if it is unknown
@@ -1062,6 +1100,7 @@ func (b *BlockChain) LocateBlocks(locator BlockLocator, hashStop *chainhash.Hash
1062 1100
 	b.chainLock.RUnlock()
1063 1101
 	return hashes
1064 1102
 }
1103
+
1065 1104
 // locateHeaders returns the headers of the blocks after the first known block in the locator until the provided stop hash is reached, or up to the provided max number of block headers. See the comment on the exported function for more details on special cases. This function MUST be called with the chain state lock held (for reads).
1066 1105
 func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Hash, maxHeaders uint32) []wire.BlockHeader {
1067 1106
 	// Find the node after the first known block in the locator and the total number of nodes after it needed while respecting the stop hash and max entries.
@@ -1077,6 +1116,7 @@ func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Has
1077 1116
 	}
1078 1117
 	return headers
1079 1118
 }
1119
+
1080 1120
 // LocateHeaders returns the headers of the blocks after the first known block in the locator until the provided stop hash is reached, or up to a max of wire.MaxBlockHeadersPerMsg headers.
1081 1121
 // In addition, there are two special cases:
1082 1122
 // - When no locators are provided, the stop hash is treated as a request for that header, so it will either return the header for the stop hash itself if it is known, or nil if it is unknown
@@ -1088,6 +1128,7 @@ func (b *BlockChain) LocateHeaders(locator BlockLocator, hashStop *chainhash.Has
1088 1128
 	b.chainLock.RUnlock()
1089 1129
 	return headers
1090 1130
 }
1131
+
1091 1132
 // IndexManager provides a generic interface that the is called when blocks are connected and disconnected to and from the tip of the main chain for the purpose of supporting optional indexes.
1092 1133
 type IndexManager interface {
1093 1134
 	// Init is invoked during chain initialize in order to allow the index manager to initialize itself and any indexes it is managing.  The channel parameter specifies a channel the caller can close to signal that the process should be interrupted.  It can be nil if that behavior is not desired.
@@ -1097,6 +1138,7 @@ type IndexManager interface {
1097 1138
 	// DisconnectBlock is invoked when a block has been disconnected from the main chain. The set of outputs scripts that were spent within this block is also returned so indexers can clean up the prior index state for this block.
1098 1139
 	DisconnectBlock(database.Tx, *util.Block, []SpentTxOut) error
1099 1140
 }
1141
+
1100 1142
 // Config is a descriptor which specifies the blockchain instance configuration.
1101 1143
 type Config struct {
1102 1144
 	// DB defines the database which houses the blocks and will be used to store all metadata created by this package such as the utxo set. This field is required.
@@ -1116,6 +1158,7 @@ type Config struct {
1116 1158
 	// HashCache defines a transaction hash mid-state cache to use when validating transactions. This cache has the potential to greatly speed up transaction validation as re-using the pre-calculated mid-state eliminates the O(N^2) validation complexity due to the SigHashAll flag. This field can be nil if the caller is not interested in using a signature cache.
1117 1159
 	HashCache *txscript.HashCache
1118 1160
 }
1161
+
1119 1162
 // New returns a BlockChain instance using the provided configuration details.
1120 1163
 func New(
1121 1164
 	config *Config) (*BlockChain, error) {

+ 11
- 0
pkg/chain/chain_test.go View File

@@ -1,13 +1,18 @@
1
+// +build test
2
+
1 3
 package chain
4
+
2 5
 import (
3 6
 	"reflect"
4 7
 	"testing"
5 8
 	"time"
9
+
6 10
 	chaincfg "git.parallelcoin.io/dev/9/pkg/chain/config"
7 11
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
8 12
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
9 13
 	"git.parallelcoin.io/dev/9/pkg/util"
10 14
 )
15
+
11 16
 // TestHaveBlock tests the HaveBlock API to ensure proper functionality.
12 17
 func TestHaveBlock(
13 18
 	t *testing.T) {
@@ -92,6 +97,7 @@ func TestHaveBlock(
92 97
 		}
93 98
 	}
94 99
 }
100
+
95 101
 // TestCalcSequenceLock tests the LockTimeToSequence function, and the CalcSequenceLock method of a Chain instance. The tests exercise several combinations of inputs to the CalcSequenceLock function in order to ensure the returned SequenceLocks are correct for each test instance.
96 102
 func TestCalcSequenceLock(
97 103
 	t *testing.T) {
@@ -364,6 +370,7 @@ func TestCalcSequenceLock(
364 370
 		}
365 371
 	}
366 372
 }
373
+
367 374
 // nodeHashes is a convenience function that returns the hashes for all of the passed indexes of the provided nodes.  It is used to construct expected hash slices in the tests.
368 375
 func nodeHashes(
369 376
 	nodes []*blockNode, indexes ...int) []chainhash.Hash {
@@ -373,6 +380,7 @@ func nodeHashes(
373 380
 	}
374 381
 	return hashes
375 382
 }
383
+
376 384
 // nodeHeaders is a convenience function that returns the headers for all of the passed indexes of the provided nodes.  It is used to construct expected located headers in the tests.
377 385
 func nodeHeaders(
378 386
 	nodes []*blockNode, indexes ...int) []wire.BlockHeader {
@@ -382,6 +390,7 @@ func nodeHeaders(
382 390
 	}
383 391
 	return headers
384 392
 }
393
+
385 394
 // TestLocateInventory ensures that locating inventory via the LocateHeaders and LocateBlocks functions behaves as expected.
386 395
 func TestLocateInventory(
387 396
 	t *testing.T) {
@@ -637,6 +646,7 @@ func TestLocateInventory(
637 646
 		}
638 647
 	}
639 648
 }
649
+
640 650
 // TestHeightToHashRange ensures that fetching a range of block hashes by start height and end hash works as expected.
641 651
 func TestHeightToHashRange(
642 652
 	t *testing.T) {
@@ -729,6 +739,7 @@ func TestHeightToHashRange(
729 739
 		}
730 740
 	}
731 741
 }
742
+
732 743
 // TestIntervalBlockHashes ensures that fetching block hashes at specified intervals by end hash works as expected.
733 744
 func TestIntervalBlockHashes(
734 745
 	t *testing.T) {

+ 54
- 0
pkg/chain/chainio.go View File

@@ -1,4 +1,5 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	"bytes"
4 5
 	"encoding/binary"
@@ -7,12 +8,14 @@ import (
7 8
 	"math/big"
8 9
 	"sync"
9 10
 	"time"
11
+
10 12
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
11 13
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
12 14
 	database "git.parallelcoin.io/dev/9/pkg/db"
13 15
 	"git.parallelcoin.io/dev/9/pkg/util"
14 16
 	cl "git.parallelcoin.io/dev/9/pkg/util/cl"
15 17
 )
18
+
16 19
 const (
17 20
 	// blockHdrSize is the size of a block header.  This is simply the constant from wire and is only provided here for convenience since wire.MaxBlockHeaderPayload is quite long.
18 21
 	blockHdrSize = wire.MaxBlockHeaderPayload
@@ -21,6 +24,7 @@ const (
21 24
 	// latestSpendJournalBucketVersion is the current version of the spend journal bucket that is used to track all spent transactions for use in reorgs.
22 25
 	latestSpendJournalBucketVersion = 1
23 26
 )
27
+
24 28
 var (
25 29
 	// blockIndexBucketName is the name of the db bucket used to house to the block headers and contextual information.
26 30
 	blockIndexBucketName = []byte("blockheaderidx")
@@ -41,35 +45,43 @@ var (
41 45
 	// byteOrder is the preferred byte order used for serializing numeric fields for storage in the database.
42 46
 	byteOrder = binary.LittleEndian
43 47
 )
48
+
44 49
 // errNotInMainChain signifies that a block hash or height that is not in the main chain was requested.
45 50
 type errNotInMainChain string
51
+
46 52
 // Error implements the error interface.
47 53
 func (e errNotInMainChain) Error() string {
48 54
 	return string(e)
49 55
 }
56
+
50 57
 // isNotInMainChainErr returns whether or not the passed error is an errNotInMainChain error.
51 58
 func isNotInMainChainErr(
52 59
 	err error) bool {
53 60
 	_, ok := err.(errNotInMainChain)
54 61
 	return ok
55 62
 }
63
+
56 64
 // errDeserialize signifies that a problem was encountered when deserializing data.
57 65
 type errDeserialize string
66
+
58 67
 func (e errDeserialize) Error() string {
59 68
 	return string(e)
60 69
 }
70
+
61 71
 // isDeserializeErr returns whether or not the passed error is an errDeserialize error.
62 72
 func isDeserializeErr(
63 73
 	err error) bool {
64 74
 	_, ok := err.(errDeserialize)
65 75
 	return ok
66 76
 }
77
+
67 78
 // isDbBucketNotFoundErr returns whether or not the passed error is a database.Error with an error code of database.ErrBucketNotFound.
68 79
 func isDbBucketNotFoundErr(
69 80
 	err error) bool {
70 81
 	dbErr, ok := err.(database.Error)
71 82
 	return ok && dbErr.ErrorCode == database.ErrBucketNotFound
72 83
 }
84
+
73 85
 // dbFetchVersion fetches an individual version with the given key from the metadata bucket.  It is primarily used to track versions on entities such as buckets.  It returns zero if the provided key does not exist.
74 86
 func dbFetchVersion(
75 87
 	dbTx database.Tx, key []byte) uint32 {
@@ -79,6 +91,7 @@ func dbFetchVersion(
79 91
 	}
80 92
 	return byteOrder.Uint32(serialized[:])
81 93
 }
94
+
82 95
 // dbPutVersion uses an existing database transaction to update the provided key in the metadata bucket to the given version.  It is primarily used to track versions on entities such as buckets.
83 96
 func dbPutVersion(
84 97
 	dbTx database.Tx, key []byte, version uint32) error {
@@ -86,6 +99,7 @@ func dbPutVersion(
86 99
 	byteOrder.PutUint32(serialized[:], version)
87 100
 	return dbTx.Metadata().Put(key, serialized[:])
88 101
 }
102
+
89 103
 // dbFetchOrCreateVersion uses an existing database transaction to attempt to fetch the provided key from the metadata bucket as a version and in the case it doesn't exist, it adds the entry with the provided default version and returns that.  This is useful during upgrades to automatically handle loading and adding version keys as necessary.
90 104
 func dbFetchOrCreateVersion(
91 105
 	dbTx database.Tx, key []byte, defaultVersion uint32) (uint32, error) {
@@ -99,6 +113,7 @@ func dbFetchOrCreateVersion(
99 113
 	}
100 114
 	return version, nil
101 115
 }
116
+
102 117
 // The transaction spend journal consists of an entry for each block connected to the main chain which contains the transaction outputs the block spends serialized such that the order is the reverse of the order they were spent. This is required because reorganizing the chain necessarily entails disconnecting blocks to get back to the point of the fork which implies unspending all of the transaction outputs that each block previously spent.
103 118
 // Since the utxo set, by definition, only contains unspent transaction outputs, the spent transaction outputs must be resurrected from somewhere.  There is more than one way this could be done, however this is the most straight forward method that does not require having a transaction index and unpruned
104 119
 // blockchain.
@@ -161,6 +176,7 @@ type SpentTxOut struct {
161 176
 	// Denotes if the creating tx is a coinbase.
162 177
 	IsCoinBase bool
163 178
 }
179
+
164 180
 // FetchSpendJournal attempts to retrieve the spend journal, or the set of outputs spent for the target block. This provides a view of all the outputs that will be consumed once the target block is connected to the end of the main chain. This function is safe for concurrent access.
165 181
 func (b *BlockChain) FetchSpendJournal(targetBlock *util.Block) ([]SpentTxOut, error) {
166 182
 	b.chainLock.RLock()
@@ -176,6 +192,7 @@ func (b *BlockChain) FetchSpendJournal(targetBlock *util.Block) ([]SpentTxOut, e
176 192
 	}
177 193
 	return spendEntries, nil
178 194
 }
195
+
179 196
 // spentTxOutHeaderCode returns the calculated header code to be used when serializing the provided stxo entry.
180 197
 func spentTxOutHeaderCode(
181 198
 	stxo *SpentTxOut) uint64 {
@@ -186,6 +203,7 @@ func spentTxOutHeaderCode(
186 203
 	}
187 204
 	return headerCode
188 205
 }
206
+
189 207
 // spentTxOutSerializeSize returns the number of bytes it would take to serialize the passed stxo according to the format described above.
190 208
 func spentTxOutSerializeSize(
191 209
 	stxo *SpentTxOut) int {
@@ -196,6 +214,7 @@ func spentTxOutSerializeSize(
196 214
 	}
197 215
 	return size + compressedTxOutSize(uint64(stxo.Amount), stxo.PkScript)
198 216
 }
217
+
199 218
 // putSpentTxOut serializes the passed stxo according to the format described above directly into the passed target byte slice.  The target byte slice must be at least large enough to handle the number of bytes returned by the SpentTxOutSerializeSize function or it will panic.
200 219
 func putSpentTxOut(
201 220
 	target []byte, stxo *SpentTxOut) int {
@@ -208,6 +227,7 @@ func putSpentTxOut(
208 227
 	return offset + putCompressedTxOut(target[offset:], uint64(stxo.Amount),
209 228
 		stxo.PkScript)
210 229
 }
230
+
211 231
 // decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed by other data, into the passed stxo struct.  It returns the number of bytes read.
212 232
 func decodeSpentTxOut(
213 233
 	serialized []byte, stxo *SpentTxOut) (int, error) {
@@ -250,6 +270,7 @@ func decodeSpentTxOut(
250 270
 	stxo.PkScript = pkScript
251 271
 	return offset, nil
252 272
 }
273
+
253 274
 // deserializeSpendJournalEntry decodes the passed serialized byte slice into a slice of spent txouts according to the format described in detail above. Since the serialization format is not self describing, as noted in the format comments, this function also requires the transactions that spend the txouts.
254 275
 func deserializeSpendJournalEntry(
255 276
 	serialized []byte, txns []*wire.MsgTx) ([]SpentTxOut, error) {
@@ -292,6 +313,7 @@ func deserializeSpendJournalEntry(
292 313
 	}
293 314
 	return stxos, nil
294 315
 }
316
+
295 317
 // serializeSpendJournalEntry serializes all of the passed spent txouts into a single byte slice according to the format described in detail above.
296 318
 func serializeSpendJournalEntry(
297 319
 	stxos []SpentTxOut) []byte {
@@ -311,6 +333,7 @@ func serializeSpendJournalEntry(
311 333
 	}
312 334
 	return serialized
313 335
 }
336
+
314 337
 // dbFetchSpendJournalEntry fetches the spend journal entry for the passed block and deserializes it into a slice of spent txout entries. NOTE: Legacy entries will not have the coinbase flag or height set unless it was the final output spend in the containing transaction.  It is up to the caller to handle this properly by looking the information up in the utxo set.
315 338
 func dbFetchSpendJournalEntry(
316 339
 	dbTx database.Tx, block *util.Block) ([]SpentTxOut, error) {
@@ -335,6 +358,7 @@ func dbFetchSpendJournalEntry(
335 358
 	}
336 359
 	return stxos, nil
337 360
 }
361
+
338 362
 // dbPutSpendJournalEntry uses an existing database transaction to update the spend journal entry for the given block hash using the provided slice of spent txouts. The spent txouts slice must contain an entry for every txout the transactions in the block spend in the order they are spent.
339 363
 func dbPutSpendJournalEntry(
340 364
 	dbTx database.Tx, blockHash *chainhash.Hash, stxos []SpentTxOut) error {
@@ -342,12 +366,14 @@ func dbPutSpendJournalEntry(
342 366
 	serialized := serializeSpendJournalEntry(stxos)
343 367
 	return spendBucket.Put(blockHash[:], serialized)
344 368
 }
369
+
345 370
 // dbRemoveSpendJournalEntry uses an existing database transaction to remove the spend journal entry for the passed block hash.
346 371
 func dbRemoveSpendJournalEntry(
347 372
 	dbTx database.Tx, blockHash *chainhash.Hash) error {
348 373
 	spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName)
349 374
 	return spendBucket.Delete(blockHash[:])
350 375
 }
376
+
351 377
 // The unspent transaction output (utxo) set consists of an entry for each unspent output using a format that is optimized to reduce space using domain specific compression algorithms.  This format is a slightly modified version of the format used in Bitcoin Core.
352 378
 // Each entry is keyed by an outpoint as specified below.  It is important to note that the key encoding uses a VLQ, which employs an MSB encoding so iteration of utxos when doing byte-wise comparisons will produce them in order.
353 379
 // The serialized key format is:
@@ -404,6 +430,7 @@ func dbRemoveSpendJournalEntry(
404 430
 // -----------------------------------------------------------------------------
405 431
 // maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes to serialize as a VLQ.
406 432
 var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
433
+
407 434
 // outpointKeyPool defines a concurrent safe free list of byte slices used to
408 435
 // provide temporary buffers for outpoint database keys.
409 436
 var outpointKeyPool = sync.Pool{
@@ -412,6 +439,7 @@ var outpointKeyPool = sync.Pool{
412 439
 		return &b // Pointer to slice to avoid boxing alloc.
413 440
 	},
414 441
 }
442
+
415 443
 // outpointKey returns a key suitable for use as a database key in the utxo set while making use of a free list.  A new buffer is allocated if there are not already any available on the free list.  The returned byte slice should be returned to the free list by using the recycleOutpointKey function when the caller is done with it _unless_ the slice will need to live for longer than the caller can calculate such as when used to write to the database.
416 444
 func outpointKey(
417 445
 	outpoint wire.OutPoint) *[]byte {
@@ -423,11 +451,13 @@ func outpointKey(
423 451
 	putVLQ((*key)[chainhash.HashSize:], idx)
424 452
 	return key
425 453
 }
454
+
426 455
 // recycleOutpointKey puts the provided byte slice, which should have been obtained via the outpointKey function, back on the free list.
427 456
 func recycleOutpointKey(
428 457
 	key *[]byte) {
429 458
 	outpointKeyPool.Put(key)
430 459
 }
460
+
431 461
 // utxoEntryHeaderCode returns the calculated header code to be used when serializing the provided utxo entry.
432 462
 func utxoEntryHeaderCode(
433 463
 	entry *UtxoEntry) (uint64, error) {
@@ -441,6 +471,7 @@ func utxoEntryHeaderCode(
441 471
 	}
442 472
 	return headerCode, nil
443 473
 }
474
+
444 475
 // serializeUtxoEntry returns the entry serialized to a format that is suitable for long-term storage.  The format is described in detail above.
445 476
 func serializeUtxoEntry(
446 477
 	entry *UtxoEntry) ([]byte, error) {
@@ -464,6 +495,7 @@ func serializeUtxoEntry(
464 495
 		entry.PkScript())
465 496
 	return serialized, nil
466 497
 }
498
+
467 499
 // deserializeUtxoEntry decodes a utxo entry from the passed serialized byte slice into a new UtxoEntry using a format that is suitable for long-term storage.  The format is described in detail above.
468 500
 func deserializeUtxoEntry(
469 501
 	serialized []byte) (*UtxoEntry, error) {
@@ -495,6 +527,7 @@ func deserializeUtxoEntry(
495 527
 	}
496 528
 	return entry, nil
497 529
 }
530
+
498 531
 // dbFetchUtxoEntryByHash attempts to find and fetch a utxo for the given hash. It uses a cursor and seek to try and do this as efficiently as possible. When there are no entries for the provided hash, nil will be returned for the both the entry and the error.
499 532
 func dbFetchUtxoEntryByHash(
500 533
 	dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) {
@@ -516,6 +549,7 @@ func dbFetchUtxoEntryByHash(
516 549
 	}
517 550
 	return deserializeUtxoEntry(cursor.Value())
518 551
 }
552
+
519 553
 // dbFetchUtxoEntry uses an existing database transaction to fetch the specified transaction output from the utxo set. When there is no entry for the provided output, nil will be returned for both the entry and the error.
520 554
 func dbFetchUtxoEntry(
521 555
 	dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, error) {
@@ -551,6 +585,7 @@ func dbFetchUtxoEntry(
551 585
 	}
552 586
 	return entry, nil
553 587
 }
588
+
554 589
 // dbPutUtxoView uses an existing database transaction to update the utxo set in the database based on the provided utxo view contents and state.  In particular, only the entries that have been marked as modified are written to the database.
555 590
 func dbPutUtxoView(
556 591
 	dbTx database.Tx, view *UtxoViewpoint) error {
@@ -584,6 +619,7 @@ func dbPutUtxoView(
584 619
 	}
585 620
 	return nil
586 621
 }
622
+
587 623
 // The block index consists of two buckets with an entry for every block in the main chain.  One bucket is for the hash to height mapping and the other is for the height to hash mapping.
588 624
 // The serialized format for values in the hash to height bucket is:
589 625
 //   <height>
@@ -610,6 +646,7 @@ func dbPutBlockIndex(
610 646
 	heightIndex := meta.Bucket(heightIndexBucketName)
611 647
 	return heightIndex.Put(serializedHeight[:], hash[:])
612 648
 }
649
+
613 650
 // dbRemoveBlockIndex uses an existing database transaction remove block index entries from the hash to height and height to hash mappings for the provided values.
614 651
 func dbRemoveBlockIndex(
615 652
 	dbTx database.Tx, hash *chainhash.Hash, height int32) error {
@@ -625,6 +662,7 @@ func dbRemoveBlockIndex(
625 662
 	heightIndex := meta.Bucket(heightIndexBucketName)
626 663
 	return heightIndex.Delete(serializedHeight[:])
627 664
 }
665
+
628 666
 // dbFetchHeightByHash uses an existing database transaction to retrieve the height for the provided hash from the index.
629 667
 func dbFetchHeightByHash(
630 668
 	dbTx database.Tx, hash *chainhash.Hash) (int32, error) {
@@ -639,6 +677,7 @@ func dbFetchHeightByHash(
639 677
 	}
640 678
 	return int32(byteOrder.Uint32(serializedHeight)), nil
641 679
 }
680
+
642 681
 // dbFetchHashByHeight uses an existing database transaction to retrieve the hash for the provided height from the index.
643 682
 func dbFetchHashByHeight(
644 683
 	dbTx database.Tx, height int32) (*chainhash.Hash, error) {
@@ -657,6 +696,7 @@ func dbFetchHashByHeight(
657 696
 	copy(hash[:], hashBytes)
658 697
 	return &hash, nil
659 698
 }
699
+
660 700
 // The best chain state consists of the best block hash and height, the total number of transactions up to and including those in the best block, and the accumulated work sum up to and including the best block.
661 701
 // The serialized format is:
662 702
 //   <block hash><block height><total txns><work sum length><work sum>
@@ -674,6 +714,7 @@ type bestChainState struct {
674 714
 	totalTxns uint64
675 715
 	workSum   *big.Int
676 716
 }
717
+
677 718
 // serializeBestChainState returns the serialization of the passed block best chain state.  This is data to be stored in the chain state bucket.
678 719
 func serializeBestChainState(
679 720
 	state bestChainState) []byte {
@@ -694,6 +735,7 @@ func serializeBestChainState(
694 735
 	copy(serializedData[offset:], workSumBytes)
695 736
 	return serializedData[:]
696 737
 }
738
+
697 739
 // deserializeBestChainState deserializes the passed serialized best chain state.  This is data stored in the chain state bucket and is updated after every block is connected or disconnected form the main chain. block.
698 740
 func deserializeBestChainState(
699 741
 	serializedData []byte) (bestChainState, error) {
@@ -724,6 +766,7 @@ func deserializeBestChainState(
724 766
 	state.workSum = new(big.Int).SetBytes(workSumBytes)
725 767
 	return state, nil
726 768
 }
769
+
727 770
 // dbPutBestState uses an existing database transaction to update the best chain state with the given parameters.
728 771
 func dbPutBestState(
729 772
 	dbTx database.Tx, snapshot *BestState, workSum *big.Int) error {
@@ -737,6 +780,7 @@ func dbPutBestState(
737 780
 	// Store the current best chain state into the database.
738 781
 	return dbTx.Metadata().Put(chainStateKeyName, serializedData)
739 782
 }
783
+
740 784
 // createChainState initializes both the database and the chain state to the genesis block.  This includes creating the necessary buckets and inserting the genesis block, so it must only be called on an uninitialized database.
741 785
 func (b *BlockChain) createChainState() error {
742 786
 	// Create a new node from the genesis block and set it as the best node.
@@ -817,6 +861,7 @@ func (b *BlockChain) createChainState() error {
817 861
 	})
818 862
 	return err
819 863
 }
864
+
820 865
 // initChainState attempts to load and initialize the chain state from the database.  When the db does not yet contain any chain state, both it and the chain state are initialized to the genesis block.
821 866
 func (b *BlockChain) initChainState() error {
822 867
 	// Determine the state of the chain database. We may need to initialize everything from scratch or upgrade certain buckets.
@@ -941,6 +986,7 @@ func (b *BlockChain) initChainState() error {
941 986
 	// As we might have updated the index after it was loaded, we'll attempt to flush the index to the DB. This will only result in a write if the elements are dirty, so it'll usually be a noop.
942 987
 	return b.Index.flushToDB()
943 988
 }
989
+
944 990
 // deserializeBlockRow parses a value in the block index bucket into a block header and block status bitfield.
945 991
 func deserializeBlockRow(
946 992
 	blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
@@ -956,6 +1002,7 @@ func deserializeBlockRow(
956 1002
 	}
957 1003
 	return &header, blockStatus(statusByte), nil
958 1004
 }
1005
+
959 1006
 // dbFetchHeaderByHash uses an existing database transaction to retrieve the block header for the provided hash.
960 1007
 func dbFetchHeaderByHash(
961 1008
 	dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) {
@@ -970,6 +1017,7 @@ func dbFetchHeaderByHash(
970 1017
 	}
971 1018
 	return &header, nil
972 1019
 }
1020
+
973 1021
 // dbFetchHeaderByHeight uses an existing database transaction to retrieve the block header for the provided height.
974 1022
 func dbFetchHeaderByHeight(
975 1023
 	dbTx database.Tx, height int32) (*wire.BlockHeader, error) {
@@ -979,6 +1027,7 @@ func dbFetchHeaderByHeight(
979 1027
 	}
980 1028
 	return dbFetchHeaderByHash(dbTx, hash)
981 1029
 }
1030
+
982 1031
 // dbFetchBlockByNode uses an existing database transaction to retrieve the raw block for the provided node, deserialize it, and return a util.Block with the height set.
983 1032
 func dbFetchBlockByNode(
984 1033
 	dbTx database.Tx, node *blockNode) (*util.Block, error) {
@@ -995,6 +1044,7 @@ func dbFetchBlockByNode(
995 1044
 	block.SetHeight(node.height)
996 1045
 	return block, nil
997 1046
 }
1047
+
998 1048
 // dbStoreBlockNode stores the block header and validation status to the block index bucket. This overwrites the current entry if there exists one.
999 1049
 func dbStoreBlockNode(
1000 1050
 	dbTx database.Tx, node *blockNode) error {
@@ -1015,6 +1065,7 @@ func dbStoreBlockNode(
1015 1065
 	key := blockIndexKey(&node.hash, uint32(node.height))
1016 1066
 	return blockIndexBucket.Put(key, value)
1017 1067
 }
1068
+
1018 1069
 // dbStoreBlock stores the provided block in the database if it is not already there. The full block data is written to ffldb.
1019 1070
 func dbStoreBlock(
1020 1071
 	dbTx database.Tx, block *util.Block) error {
@@ -1027,6 +1078,7 @@ func dbStoreBlock(
1027 1078
 	}
1028 1079
 	return dbTx.StoreBlock(block)
1029 1080
 }
1081
+
1030 1082
 // blockIndexKey generates the binary key for an entry in the block index bucket. The key is composed of the block height encoded as a big-endian 32-bit unsigned int followed by the 32 byte block hash.
1031 1083
 func blockIndexKey(
1032 1084
 	blockHash *chainhash.Hash, blockHeight uint32) []byte {
@@ -1035,6 +1087,7 @@ func blockIndexKey(
1035 1087
 	copy(indexKey[4:chainhash.HashSize+4], blockHash[:])
1036 1088
 	return indexKey
1037 1089
 }
1090
+
1038 1091
 // BlockByHeight returns the block at the given height in the main chain. This function is safe for concurrent access.
1039 1092
 func (b *BlockChain) BlockByHeight(blockHeight int32) (*util.Block, error) {
1040 1093
 	// Lookup the block height in the best chain.
@@ -1052,6 +1105,7 @@ func (b *BlockChain) BlockByHeight(blockHeight int32) (*util.Block, error) {
1052 1105
 	})
1053 1106
 	return block, err
1054 1107
 }
1108
+
1055 1109
 // BlockByHash returns the block from the main chain with the given hash with the appropriate chain height set. This function is safe for concurrent access.
1056 1110
 func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*util.Block, error) {
1057 1111
 	// Lookup the block hash in block index and ensure it is in the best chain.

+ 14
- 0
pkg/chain/chainio_test.go View File

@@ -1,13 +1,18 @@
1
+// +build test
2
+
1 3
 package chain
4
+
2 5
 import (
3 6
 	"bytes"
4 7
 	"errors"
5 8
 	"math/big"
6 9
 	"reflect"
7 10
 	"testing"
11
+
8 12
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
9 13
 	database "git.parallelcoin.io/dev/9/pkg/db"
10 14
 )
15
+
11 16
 // TestErrNotInMainChain ensures the functions related to errNotInMainChain work as expected.
12 17
 func TestErrNotInMainChain(
13 18
 	t *testing.T) {
@@ -27,6 +32,7 @@ func TestErrNotInMainChain(
27 32
 		t.Fatalf("isNotInMainChainErr detected incorrect type")
28 33
 	}
29 34
 }
35
+
30 36
 // TestStxoSerialization ensures serializing and deserializing spent transaction output entries works as expected.
31 37
 func TestStxoSerialization(
32 38
 	t *testing.T) {
@@ -114,6 +120,7 @@ func TestStxoSerialization(
114 120
 		}
115 121
 	}
116 122
 }
123
+
117 124
 // TestStxoDecodeErrors performs negative tests against decoding spent transaction outputs to ensure error paths work as expected.
118 125
 func TestStxoDecodeErrors(
119 126
 	t *testing.T) {
@@ -180,6 +187,7 @@ func TestStxoDecodeErrors(
180 187
 		}
181 188
 	}
182 189
 }
190
+
183 191
 // TestSpendJournalSerialization ensures serializing and deserializing spend journal entries works as expected.
184 192
 func TestSpendJournalSerialization(
185 193
 	t *testing.T) {
@@ -313,6 +321,7 @@ func TestSpendJournalSerialization(
313 321
 		}
314 322
 	}
315 323
 }
324
+
316 325
 // TestSpendJournalErrors performs negative tests against deserializing spend journal entries to ensure error paths work as expected.
317 326
 func TestSpendJournalErrors(
318 327
 	t *testing.T) {
@@ -377,6 +386,7 @@ func TestSpendJournalErrors(
377 386
 		}
378 387
 	}
379 388
 }
389
+
380 390
 // TestUtxoSerialization ensures serializing and deserializing unspent trasaction output entries works as expected.
381 391
 func TestUtxoSerialization(
382 392
 	t *testing.T) {
@@ -489,6 +499,7 @@ func TestUtxoSerialization(
489 499
 		}
490 500
 	}
491 501
 }
502
+
492 503
 // TestUtxoEntryHeaderCodeErrors performs negative tests against unspent transaction output header codes to ensure error paths work as expected.
493 504
 func TestUtxoEntryHeaderCodeErrors(
494 505
 	t *testing.T) {
@@ -521,6 +532,7 @@ func TestUtxoEntryHeaderCodeErrors(
521 532
 		}
522 533
 	}
523 534
 }
535
+
524 536
 // TestUtxoEntryDeserializeErrors performs negative tests against deserializing unspent transaction outputs to ensure error paths work as expected.
525 537
 func TestUtxoEntryDeserializeErrors(
526 538
 	t *testing.T) {
@@ -557,6 +569,7 @@ func TestUtxoEntryDeserializeErrors(
557 569
 		}
558 570
 	}
559 571
 }
572
+
560 573
 // TestBestChainStateSerialization ensures serializing and deserializing the best chain state works as expected.
561 574
 func TestBestChainStateSerialization(
562 575
 	t *testing.T) {
@@ -620,6 +633,7 @@ func TestBestChainStateSerialization(
620 633
 		}
621 634
 	}
622 635
 }
636
+
623 637
 // TestBestChainStateDeserializeErrors performs negative tests against deserializing the chain state to ensure error paths work as expected.
624 638
 func TestBestChainStateDeserializeErrors(
625 639
 	t *testing.T) {

+ 25
- 0
pkg/chain/chainview.go View File

@@ -1,11 +1,15 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	"sync"
4 5
 )
6
+
5 7
 // approxNodesPerWeek is an approximation of the number of new blocks there are in a week on average.
6 8
 const approxNodesPerWeek = 6 * 24 * 7
9
+
7 10
 // log2FloorMasks defines the masks to use when quickly calculating floor(log2(x)) in a constant log2(32) = 5 steps, where x is a uint32, using shifts.  They are derived from (2^(2^x) - 1) * (2^(2^x)), for x in 4..0.
8 11
 var log2FloorMasks = []uint32{0xffff0000, 0xff00, 0xf0, 0xc, 0x2}
12
+
9 13
 // fastLog2Floor calculates and returns floor(log2(x)) in a constant 5 steps.
10 14
 func fastLog2Floor(
11 15
 	n uint32) uint8 {
@@ -20,6 +24,7 @@ func fastLog2Floor(
20 24
 	}
21 25
 	return rv
22 26
 }
27
+
23 28
 // chainView provides a flat view of a specific branch of the block chain from its tip back to the genesis block and provides various convenience functions for comparing chains.
24 29
 // For example, assume a block chain with a side chain as depicted below:
25 30
 //   genesis -> 1 -> 2 -> 3 -> 4  -> 5 ->  6  -> 7  -> 8
@@ -30,6 +35,7 @@ type chainView struct {
30 35
 	mtx   sync.Mutex
31 36
 	nodes []*blockNode
32 37
 }
38
+
33 39
 // newChainView returns a new chain view for the given tip block node. Passing nil as the tip will result in a chain view that is not initialized. The tip
34 40
 // can be updated at any time via the setTip function.
35 41
 func newChainView(
@@ -39,6 +45,7 @@ func newChainView(
39 45
 	c.setTip(tip)
40 46
 	return &c
41 47
 }
48
+
42 49
 // genesis returns the genesis block for the chain view. This only differs from the exported version in that it is up to the caller to ensure the lock is held. This function MUST be called with the view mutex locked (for reads).
43 50
 func (c *chainView) genesis() *blockNode {
44 51
 	if len(c.nodes) == 0 {
@@ -46,6 +53,7 @@ func (c *chainView) genesis() *blockNode {
46 53
 	}
47 54
 	return c.nodes[0]
48 55
 }
56
+
49 57
 // Genesis returns the genesis block for the chain view. This function is safe for concurrent access.
50 58
 func (c *chainView) Genesis() *blockNode {
51 59
 	c.mtx.Lock()
@@ -53,6 +61,7 @@ func (c *chainView) Genesis() *blockNode {
53 61
 	c.mtx.Unlock()
54 62
 	return genesis
55 63
 }
64
+
56 65
 // tip returns the current tip block node for the chain view.  It will return nil if there is no tip.  This only differs from the exported version in that it is up to the caller to ensure the lock is held. This function MUST be called with the view mutex locked (for reads).
57 66
 func (c *chainView) tip() *blockNode {
58 67
 	if len(c.nodes) == 0 {
@@ -60,6 +69,7 @@ func (c *chainView) tip() *blockNode {
60 69
 	}
61 70
 	return c.nodes[len(c.nodes)-1]
62 71
 }
72
+
63 73
 // Tip returns the current tip block node for the chain view.  It will return nil if there is no tip. This function is safe for concurrent access.
64 74
 func (c *chainView) Tip() *blockNode {
65 75
 	c.mtx.Lock()
@@ -67,6 +77,7 @@ func (c *chainView) Tip() *blockNode {
67 77
 	c.mtx.Unlock()
68 78
 	return tip
69 79
 }
80
+
70 81
 // setTip sets the chain view to use the provided block node as the current tip and ensures the view is consistent by populating it with the nodes obtained by walking backwards all the way to genesis block as necessary.  Further calls will only perform the minimum work needed, so switching between chain tips is efficient.  This only differs from the exported version in that it is up to the caller to ensure the lock is held. This function MUST be called with the view mutex locked (for writes).
71 82
 func (c *chainView) setTip(node *blockNode) {
72 83
 	if node == nil {
@@ -92,16 +103,19 @@ func (c *chainView) setTip(node *blockNode) {
92 103
 		node = node.parent
93 104
 	}
94 105
 }
106
+
95 107
 // SetTip sets the chain view to use the provided block node as the current tip and ensures the view is consistent by populating it with the nodes obtained by walking backwards all the way to genesis block as necessary.  Further calls will only perform the minimum work needed, so switching between chain tips is efficient. This function is safe for concurrent access.
96 108
 func (c *chainView) SetTip(node *blockNode) {
97 109
 	c.mtx.Lock()
98 110
 	c.setTip(node)
99 111
 	c.mtx.Unlock()
100 112
 }
113
+
101 114
 // height returns the height of the tip of the chain view.  It will return -1 if there is no tip (which only happens if the chain view has not been initialized).  This only differs from the exported version in that it is up to the caller to ensure the lock is held. This function MUST be called with the view mutex locked (for reads).
102 115
 func (c *chainView) height() int32 {
103 116
 	return int32(len(c.nodes) - 1)
104 117
 }
118
+
105 119
 // Height returns the height of the tip of the chain view.  It will return -1 if there is no tip (which only happens if the chain view has not been initialized). This function is safe for concurrent access.
106 120
 func (c *chainView) Height() int32 {
107 121
 	c.mtx.Lock()
@@ -109,6 +123,7 @@ func (c *chainView) Height() int32 {
109 123
 	c.mtx.Unlock()
110 124
 	return height
111 125
 }
126
+
112 127
 // nodeByHeight returns the block node at the specified height.  Nil will be returned if the height does not exist.  This only differs from the exported version in that it is up to the caller to ensure the lock is held. This function MUST be called with the view mutex locked (for reads).
113 128
 func (c *chainView) nodeByHeight(height int32) *blockNode {
114 129
 	if height < 0 || height >= int32(len(c.nodes)) {
@@ -116,6 +131,7 @@ func (c *chainView) nodeByHeight(height int32) *blockNode {
116 131
 	}
117 132
 	return c.nodes[height]
118 133
 }
134
+
119 135
 // NodeByHeight returns the block node at the specified height.  Nil will be returned if the height does not exist. This function is safe for concurrent access.
120 136
 func (c *chainView) NodeByHeight(height int32) *blockNode {
121 137
 	c.mtx.Lock()
@@ -123,6 +139,7 @@ func (c *chainView) NodeByHeight(height int32) *blockNode {
123 139
 	c.mtx.Unlock()
124 140
 	return node
125 141
 }
142
+
126 143
 // Equals returns whether or not two chain views are the same.  Uninitialized views (tip set to nil) are considered equal. This function is safe for concurrent access.
127 144
 func (c *chainView) Equals(other *chainView) bool {
128 145
 	c.mtx.Lock()
@@ -132,10 +149,12 @@ func (c *chainView) Equals(other *chainView) bool {
132 149
 	c.mtx.Unlock()
133 150
 	return equals
134 151
 }
152
+
135 153
 // contains returns whether or not the chain view contains the passed block node.  This only differs from the exported version in that it is up to the caller to ensure the lock is held. This function MUST be called with the view mutex locked (for reads).
136 154
 func (c *chainView) contains(node *blockNode) bool {
137 155
 	return c.nodeByHeight(node.height) == node
138 156
 }
157
+
139 158
 // Contains returns whether or not the chain view contains the passed block node. This function is safe for concurrent access.
140 159
 func (c *chainView) Contains(node *blockNode) bool {
141 160
 	c.mtx.Lock()
@@ -143,6 +162,7 @@ func (c *chainView) Contains(node *blockNode) bool {
143 162
 	c.mtx.Unlock()
144 163
 	return contains
145 164
 }
165
+
146 166
 // next returns the successor to the provided node for the chain view.  It will return nil if there is no successor or the provided node is not part of the view.  This only differs from the exported version in that it is up to the caller to ensure the lock is held. See the comment on the exported function for more details. This function MUST be called with the view mutex locked (for reads).
147 167
 func (c *chainView) next(node *blockNode) *blockNode {
148 168
 	if node == nil || !c.contains(node) {
@@ -150,6 +170,7 @@ func (c *chainView) next(node *blockNode) *blockNode {
150 170
 	}
151 171
 	return c.nodeByHeight(node.height + 1)
152 172
 }
173
+
153 174
 // Next returns the successor to the provided node for the chain view.  It will return nil if there is no successfor or the provided node is not part of the view.
154 175
 // For example, assume a block chain with a side chain as depicted below:
155 176
 //   genesis -> 1 -> 2 -> 3 -> 4  -> 5 ->  6  -> 7  -> 8
@@ -164,6 +185,7 @@ func (c *chainView) Next(node *blockNode) *blockNode {
164 185
 	c.mtx.Unlock()
165 186
 	return next
166 187
 }
188
+
167 189
 // findFork returns the final common block between the provided node and the the chain view.  It will return nil if there is no common block.  This only differs from the exported version in that it is up to the caller to ensure the lock is held. See the exported FindFork comments for more details. This function MUST be called with the view mutex locked (for reads).
168 190
 func (c *chainView) findFork(node *blockNode) *blockNode {
169 191
 	// No fork point for node that doesn't exist.
@@ -182,6 +204,7 @@ func (c *chainView) findFork(node *blockNode) *blockNode {
182 204
 	}
183 205
 	return node
184 206
 }
207
+
185 208
 // FindFork returns the final common block between the provided node and the the chain view.  It will return nil if there is no common block.
186 209
 // For example, assume a block chain with a side chain as depicted below:
187 210
 //   genesis -> 1 -> 2 -> ... -> 5 -> 6  -> 7  -> 8
@@ -195,6 +218,7 @@ func (c *chainView) FindFork(node *blockNode) *blockNode {
195 218
 	c.mtx.Unlock()
196 219
 	return fork
197 220
 }
221
+
198 222
 // blockLocator returns a block locator for the passed block node.  The passed node can be nil in which case the block locator for the current tip associated with the view will be returned. This only differs from the exported version in that it is up to the caller to ensure the lock is held. See the exported BlockLocator function comments for more details. This function MUST be called with the view mutex locked (for reads).
199 223
 func (c *chainView) blockLocator(node *blockNode) BlockLocator {
200 224
 	// Use the current tip if requested.
@@ -239,6 +263,7 @@ func (c *chainView) blockLocator(node *blockNode) BlockLocator {
239 263
 	}
240 264
 	return locator
241 265
 }
266
+
242 267
 // BlockLocator returns a block locator for the passed block node.  The passed node can be nil in which case the block locator for the current tip associated with the view will be returned. See the BlockLocator type for details on the algorithm used to create a block locator. This function is safe for concurrent access.
243 268
 func (c *chainView) BlockLocator(node *blockNode) BlockLocator {
244 269
 	c.mtx.Lock()

+ 35
- 21
pkg/chain/chainview_test.go View File

@@ -1,13 +1,19 @@
1
+// +build test
2
+
1 3
 package chain
4
+
2 5
 import (
3 6
 	"fmt"
4 7
 	"math/rand"
5 8
 	"reflect"
6 9
 	"testing"
10
+
7 11
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
8 12
 )
13
+
9 14
 // testNoncePrng provides a deterministic prng for the nonce in generated fake nodes.  The ensures that the node have unique hashes.
10 15
 var testNoncePrng = rand.New(rand.NewSource(0))
16
+
11 17
 // chainedNodes returns the specified number of nodes constructed such that each subsequent node points to the previous one to create a chain.  The first node will point to the passed parent which can be nil if desired.
12 18
 func chainedNodes(
13 19
 	parent *blockNode, numNodes int) []*blockNode {
@@ -24,15 +30,18 @@ func chainedNodes(
24 30
 	}
25 31
 	return nodes
26 32
 }
33
+
27 34
 // String returns the block node as a human-readable name.
28 35
 func (node blockNode) String() string {
29 36
 	return fmt.Sprintf("%s(%d)", node.hash, node.height)
30 37
 }
38
+
31 39
 // tstTip is a convenience function to grab the tip of a chain of block nodes created via chainedNodes.
32 40
 func tstTip(
33 41
 	nodes []*blockNode) *blockNode {
34 42
 	return nodes[len(nodes)-1]
35 43
 }
44
+
36 45
 // locatorHashes is a convenience function that returns the hashes for all of the passed indexes of the provided nodes.  It is used to construct expected block locators in the tests.
37 46
 func locatorHashes(
38 47
 	nodes []*blockNode, indexes ...int) BlockLocator {
@@ -42,6 +51,7 @@ func locatorHashes(
42 51
 	}
43 52
 	return hashes
44 53
 }
54
+
45 55
 // zipLocators is a convenience function that returns a single block locator given a variable number of them and is used in the tests.
46 56
 func zipLocators(
47 57
 	locators ...BlockLocator) BlockLocator {
@@ -51,6 +61,7 @@ func zipLocators(
51 61
 	}
52 62
 	return hashes
53 63
 }
64
+
54 65
 // TestChainView ensures all of the exported functionality of chain views works as intended with the exception of some special cases which are handled in other tests.
55 66
 func TestChainView(
56 67
 	t *testing.T) {
@@ -89,13 +100,13 @@ func TestChainView(
89 100
 	}{
90 101
 		{
91 102
 			// Create a view for branch 0 as the active chain and another view for branch 1 as the side chain.
92
-			name:    "chain0-chain1",
93
-			view:    newChainView(tip(branch0Nodes)),
94
-			genesis: branch0Nodes[0],
95
-			tip:     tip(branch0Nodes),
96
-			side:    newChainView(tip(branch1Nodes)),
97
-			sideTip: tip(branch1Nodes),
98
-			fork: branch0Nodes[1],
103
+			name:       "chain0-chain1",
104
+			view:       newChainView(tip(branch0Nodes)),
105
+			genesis:    branch0Nodes[0],
106
+			tip:        tip(branch0Nodes),
107
+			side:       newChainView(tip(branch1Nodes)),
108
+			sideTip:    tip(branch1Nodes),
109
+			fork:       branch0Nodes[1],
99 110
 			contains:   branch0Nodes,
100 111
 			noContains: branch1Nodes,
101 112
 			equal:      newChainView(tip(branch0Nodes)),
@@ -104,13 +115,13 @@ func TestChainView(
104 115
 		},
105 116
 		{
106 117
 			// Create a view for branch 1 as the active chain and another view for branch 2 as the side chain.
107
-			name:    "chain1-chain2",
108
-			view:    newChainView(tip(branch1Nodes)),
109
-			genesis: branch0Nodes[0],
110
-			tip:     tip(branch1Nodes),
111
-			side:    newChainView(tip(branch2Nodes)),
112
-			sideTip: tip(branch2Nodes),
113
-			fork: branch1Nodes[0],
118
+			name:       "chain1-chain2",
119
+			view:       newChainView(tip(branch1Nodes)),
120
+			genesis:    branch0Nodes[0],
121
+			tip:        tip(branch1Nodes),
122
+			side:       newChainView(tip(branch2Nodes)),
123
+			sideTip:    tip(branch2Nodes),
124
+			fork:       branch1Nodes[0],
114 125
 			contains:   branch1Nodes,
115 126
 			noContains: branch2Nodes,
116 127
 			equal:      newChainView(tip(branch1Nodes)),
@@ -122,13 +133,13 @@ func TestChainView(
122 133
 		},
123 134
 		{
124 135
 			// Create a view for branch 2 as the active chain and another view for branch 0 as the side chain.
125
-			name:    "chain2-chain0",
126
-			view:    newChainView(tip(branch2Nodes)),
127
-			genesis: branch0Nodes[0],
128
-			tip:     tip(branch2Nodes),
129
-			side:    newChainView(tip(branch0Nodes)),
130
-			sideTip: tip(branch0Nodes),
131
-			fork: branch0Nodes[1],
136
+			name:       "chain2-chain0",
137
+			view:       newChainView(tip(branch2Nodes)),
138
+			genesis:    branch0Nodes[0],
139
+			tip:        tip(branch2Nodes),
140
+			side:       newChainView(tip(branch0Nodes)),
141
+			sideTip:    tip(branch0Nodes),
142
+			fork:       branch0Nodes[1],
132 143
 			contains:   branch2Nodes,
133 144
 			noContains: branch0Nodes[2:],
134 145
 			equal:      newChainView(tip(branch2Nodes)),
@@ -265,6 +276,7 @@ testLoop:
265 276
 		}
266 277
 	}
267 278
 }
279
+
268 280
 // TestChainViewForkCorners ensures that finding the fork between two chains works in some corner cases such as when the two chains have completely unrelated histories.
269 281
 func TestChainViewForkCorners(
270 282
 	t *testing.T) {
@@ -292,6 +304,7 @@ func TestChainViewForkCorners(
292 304
 		}
293 305
 	}
294 306
 }
307
+
295 308
 // TestChainViewSetTip ensures changing the tip works as intended including capacity changes.
296 309
 func TestChainViewSetTip(
297 310
 	t *testing.T) {
@@ -358,6 +371,7 @@ testLoop:
358 371
 		}
359 372
 	}
360 373
 }
374
+
361 375
 // TestChainViewNil ensures that creating and accessing a nil chain view behaves as expected.
362 376
 func TestChainViewNil(
363 377
 	t *testing.T) {

+ 11
- 0
pkg/chain/checkpoints.go View File

@@ -1,30 +1,37 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	"fmt"
4 5
 	"time"
6
+
5 7
 	chaincfg "git.parallelcoin.io/dev/9/pkg/chain/config"
6 8
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
7 9
 	txscript "git.parallelcoin.io/dev/9/pkg/chain/tx/script"
8 10
 	"git.parallelcoin.io/dev/9/pkg/util"
9 11
 	cl "git.parallelcoin.io/dev/9/pkg/util/cl"
10 12
 )
13
+
11 14
 // CheckpointConfirmations is the number of blocks before the end of the current best block chain that a good checkpoint candidate must be.
12 15
 // TODO: review this and add it to the fork spec
13 16
 const CheckpointConfirmations = 2016
17
+
14 18
 // newHashFromStr converts the passed big-endian hex string into a chainhash.Hash.  It only differs from the one available in chainhash in that it ignores the error since it will only (and must only) be called with hard-coded, and therefore known good, hashes.
15 19
 func newHashFromStr(
16 20
 	hexStr string) *chainhash.Hash {
17 21
 	hash, _ := chainhash.NewHashFromStr(hexStr)
18 22
 	return hash
19 23
 }
24
+
20 25
 // Checkpoints returns a slice of checkpoints (regardless of whether they are already known).  When there are no checkpoints for the chain, it will return nil. This function is safe for concurrent access.
21 26
 func (b *BlockChain) Checkpoints() []chaincfg.Checkpoint {
22 27
 	return b.checkpoints
23 28
 }
29
+
24 30
 // HasCheckpoints returns whether this BlockChain has checkpoints defined. This function is safe for concurrent access.
25 31
 func (b *BlockChain) HasCheckpoints() bool {
26 32
 	return len(b.checkpoints) > 0
27 33
 }
34
+
28 35
 // LatestCheckpoint returns the most recent checkpoint (regardless of whether it is already known). When there are no defined checkpoints for the active chain instance, it will return nil. This function is safe for concurrent access.
29 36
 func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint {
30 37
 	if !b.HasCheckpoints() {
@@ -32,6 +39,7 @@ func (b *BlockChain) LatestCheckpoint() *chaincfg.Checkpoint {
32 39
 	}
33 40
 	return &b.checkpoints[len(b.checkpoints)-1]
34 41
 }
42
+
35 43
 // verifyCheckpoint returns whether the passed block height and hash combination match the checkpoint data.  It also returns true if there is no checkpoint data for the passed block height.
36 44
 func (b *BlockChain) verifyCheckpoint(height int32, hash *chainhash.Hash) bool {
37 45
 	if !b.HasCheckpoints() {
@@ -52,6 +60,7 @@ func (b *BlockChain) verifyCheckpoint(height int32, hash *chainhash.Hash) bool {
52 60
 	}
53 61
 	return true
54 62
 }
63
+
55 64
 // findPreviousCheckpoint finds the most recent checkpoint that is already available in the downloaded portion of the block chain and returns the associated block node.  It returns nil if a checkpoint can't be found (this should really only happen for blocks before the first checkpoint). This function MUST be called with the chain lock held (for reads).
56 65
 func (b *BlockChain) findPreviousCheckpoint() (*blockNode, error) {
57 66
 	if !b.HasCheckpoints() {
@@ -108,6 +117,7 @@ func (b *BlockChain) findPreviousCheckpoint() (*blockNode, error) {
108 117
 	}
109 118
 	return b.checkpointNode, nil
110 119
 }
120
+
111 121
 // isNonstandardTransaction determines whether a transaction contains any scripts which are not one of the standard types.
112 122
 func isNonstandardTransaction(
113 123
 	tx *util.Tx) bool {
@@ -120,6 +130,7 @@ func isNonstandardTransaction(
120 130
 	}
121 131
 	return false
122 132
 }
133
+
123 134
 // IsCheckpointCandidate returns whether or not the passed block is a good checkpoint candidate.
124 135
 // The factors used to determine a good checkpoint are:
125 136
 //  - The block must be in the main chain

+ 14
- 0
pkg/chain/common_test.go View File

@@ -1,4 +1,7 @@
1
+// +build test
2
+
1 3
 package chain
4
+
2 5
 import (
3 6
 	"compress/bzip2"
4 7
 	"encoding/binary"
@@ -8,6 +11,7 @@ import (
8 11
 	"path/filepath"
9 12
 	"strings"
10 13
 	"time"
14
+
11 15
 	chaincfg "git.parallelcoin.io/dev/9/pkg/chain/config"
12 16
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
13 17
 	txscript "git.parallelcoin.io/dev/9/pkg/chain/tx/script"
@@ -16,6 +20,7 @@ import (
16 20
 	_ "git.parallelcoin.io/dev/9/pkg/db/ffldb"
17 21
 	"git.parallelcoin.io/dev/9/pkg/util"
18 22
 )
23
+
19 24
 const (
20 25
 	// testDbType is the database backend type to use for the tests.
21 26
 	testDbType = "ffldb"
@@ -24,6 +29,7 @@ const (
24 29
 	// blockDataNet is the expected network in the test block data.
25 30
 	blockDataNet = wire.MainNet
26 31
 )
32
+
27 33
 // filesExists returns whether or not the named file or directory exists.
28 34
 func fileExists(
29 35
 	name string) bool {
@@ -34,6 +40,7 @@ func fileExists(
34 40
 	}
35 41
 	return true
36 42
 }
43
+
37 44
 // isSupportedDbType returns whether or not the passed database type is currently supported.
38 45
 func isSupportedDbType(
39 46
 	dbType string) bool {
@@ -45,6 +52,7 @@ func isSupportedDbType(
45 52
 	}
46 53
 	return false
47 54
 }
55
+
48 56
 // loadBlocks reads files containing bitcoin block data (gzipped but otherwise in the format bitcoind writes) from disk and returns them as an array of util.Block.  This is largely borrowed from the test code in podb.
49 57
 func loadBlocks(
50 58
 	filename string) (blocks []*util.Block, err error) {
@@ -92,6 +100,7 @@ func loadBlocks(
92 100
 	}
93 101
 	return
94 102
 }
103
+
95 104
 // chainSetup is used to create a new db and chain instance with the genesis block already inserted.  In addition to the new chain instance, it returns a teardown function the caller should invoke when done testing to clean up.
96 105
 func chainSetup(
97 106
 	dbName string, params *chaincfg.Params) (*BlockChain, func(), error) {
@@ -152,6 +161,7 @@ func chainSetup(
152 161
 	}
153 162
 	return chain, teardown, nil
154 163
 }
164
+
155 165
 // loadUtxoView returns a utxo view loaded from a file.
156 166
 func loadUtxoView(
157 167
 	filename string) (*UtxoViewpoint, error) {
@@ -211,6 +221,7 @@ func loadUtxoView(
211 221
 	}
212 222
 	return view, nil
213 223
 }
224
+
214 225
 // convertUtxoStore reads a utxostore from the legacy format and writes it back out using the latest format.  It is only useful for converting utxostore data used in the tests, which has already been done.  However, the code is left available for future reference.
215 226
 func convertUtxoStore(
216 227
 	r io.Reader, w io.Writer) error {
@@ -278,10 +289,12 @@ func convertUtxoStore(
278 289
 	}
279 290
 	return nil
280 291
 }
292
+
281 293
 // TstSetCoinbaseMaturity makes the ability to set the coinbase maturity available when running tests.
282 294
 func (b *BlockChain) TstSetCoinbaseMaturity(maturity uint16) {
283 295
 	b.chainParams.CoinbaseMaturity = maturity
284 296
 }
297
+
285 298
 // newFakeChain returns a chain that is usable for syntetic tests.  It is important to note that this chain has no database associated with it, so it is not usable with all functions and the tests must take care when making use of it.
286 299
 func newFakeChain(
287 300
 	params *chaincfg.Params) *BlockChain {
@@ -304,6 +317,7 @@ func newFakeChain(
304 317
 		deploymentCaches:    newThresholdCaches(chaincfg.DefinedDeployments),
305 318
 	}
306 319
 }
320
+
307 321
 // newFakeNode creates a block node connected to the passed parent with the provided fields populated and fake values for the other fields.
308 322
 func newFakeNode(
309 323
 	parent *blockNode, blockVersion int32, bits uint32, timestamp time.Time) *blockNode {

+ 17
- 0
pkg/chain/compress.go View File

@@ -1,8 +1,10 @@
1 1
 package chain
2
+
2 3
 import (
3 4
 	txscript "git.parallelcoin.io/dev/9/pkg/chain/tx/script"
4 5
 	ec "git.parallelcoin.io/dev/9/pkg/util/elliptic"
5 6
 )
7
+
6 8
 // In order to reduce the size of stored scripts, a domain specific compression algorithm is used which recognizes standard scripts and stores them using less bytes than the original script.  The compression algorithm used here was obtained from Bitcoin Core, so all credits for the algorithm go to it.
7 9
 // The general serialized format is:
8 10
 //   <script size or type><script data>
@@ -34,6 +36,7 @@ const (
34 36
 	// numSpecialScripts is the number of special scripts recognized by the domain-specific script compression algorithm.
35 37
 	numSpecialScripts = 6
36 38
 )
39
+
37 40
 // In order to reduce the size of stored amounts, a domain specific compression algorithm is used which relies on there typically being a lot of zeroes at end of the amounts.  The compression algorithm used here was obtained from Bitcoin Core, so all credits for the algorithm go to it. While this is simply exchanging one uint64 for another, the resulting value for typical amounts has a much smaller magnitude which results in fewer bytes when encoded as variable length quantity.  For example, consider the amount of 0.1 DUO which is 10000000 satoshi.  Encoding 10000000 as a VLQ would take 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte. Essentially the compression is achieved by splitting the value into an exponent in the range [0-9] and a digit in the range [1-9], when possible, and encoding them in a way that can be decoded.  More specifically, the encoding is as follows:
38 41
 // - 0 is 0
39 42
 // - Find the exponent, e, as the largest power of 10 that evenly divides the
@@ -79,6 +82,7 @@ func compressTxOutAmount(
79 82
 	// 1 + 10*(n-1) + e   ==   10 + 10*(n-1)
80 83
 	return 10 + 10*(amount-1)
81 84
 }
85
+
82 86
 // compressedScriptSize returns the number of bytes the passed script would take when encoded with the domain specific compression algorithm described above.
83 87
 func compressedScriptSize(
84 88
 	pkScript []byte) int {
@@ -98,6 +102,7 @@ func compressedScriptSize(
98 102
 	return serializeSizeVLQ(uint64(len(pkScript)+numSpecialScripts)) +
99 103
 		len(pkScript)
100 104
 }
105
+
101 106
 // Compressed transaction outputs consist of an amount and a public key script both compressed using the domain specific compression algorithms previously
102 107
 // described.
103 108
 // The serialized format is:
@@ -111,6 +116,7 @@ func compressedTxOutSize(
111 116
 	return serializeSizeVLQ(compressTxOutAmount(amount)) +
112 117
 		compressedScriptSize(pkScript)
113 118
 }
119
+
114 120
 // decodeCompressedScriptSize treats the passed serialized bytes as a compressed script, possibly followed by other data, and returns the number of bytes it occupies taking into account the special encoding of the script size by the domain specific compression algorithm described above.
115 121
 func decodeCompressedScriptSize(
116 122
 	serialized []byte) int {
@@ -131,6 +137,7 @@ func decodeCompressedScriptSize(
131 137
 	scriptSize += uint64(bytesRead)
132 138
 	return int(scriptSize)
133 139
 }
140
+
134 141
 // decodeCompressedTxOut decodes the passed compressed txout, possibly followed by other data, into its uncompressed amount and script and returns them along with the number of bytes they occupied prior to decompression.
135 142
 func decodeCompressedTxOut(
136 143
 	serialized []byte) (uint64, []byte, int, error) {
@@ -151,6 +158,7 @@ func decodeCompressedTxOut(
151 158
 	script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
152 159
 	return amount, script, bytesRead + scriptSize, nil
153 160
 }
161
+
154 162
 // decompressScript returns the original script obtained by decompressing the passed compressed script according to the domain specific compression algorithm described above. NOTE: The script parameter must already have been proven to be long enough to contain the number of bytes returned by decodeCompressedScriptSize or it will panic.  This is acceptable since it is only an internal function.
155 163
 func decompressScript(
156 164
 	compressedPkScript []byte) []byte {
@@ -213,6 +221,7 @@ func decompressScript(
213 221
 	copy(pkScript, compressedPkScript[bytesRead:bytesRead+scriptSize])
214 222
 	return pkScript
215 223
 }
224
+
216 225
 // decompressTxOutAmount returns the original amount the passed compressed amount represents according to the domain specific compression algorithm described above.
217 226
 func decompressTxOutAmount(
218 227
 	amount uint64) uint64 {
@@ -246,6 +255,7 @@ func decompressTxOutAmount(
246 255
 	}
247 256
 	return n
248 257
 }
258
+
249 259
 // deserializeVLQ deserializes the provided variable-length quantity according to the format described above.  It also returns the number of bytes deserialized.
250 260
 func deserializeVLQ(
251 261
 	serialized []byte) (uint64, int) {
@@ -261,6 +271,7 @@ func deserializeVLQ(
261 271
 	}
262 272
 	return n, size
263 273
 }
274
+
264 275
 // isPubKey returns whether or not the passed public key script is a standard pay-to-pubkey script that pays to a valid compressed or uncompressed public key along with the serialized pubkey it is paying to if it is. NOTE: This function ensures the public key is actually valid since the compression algorithm requires valid pubkeys.  It does not support hybrid pubkeys.  This means that even if the script has the correct form for a pay-to-pubkey script, this function will only return true when it is paying to a valid compressed or uncompressed pubkey.
265 276
 func isPubKey(
266 277
 	script []byte) (bool, []byte) {
@@ -287,6 +298,7 @@ func isPubKey(
287 298
 	}
288 299
 	return false, nil
289 300
 }
301
+
290 302
 // isPubKeyHash returns whether or not the passed public key script is a standard pay-to-pubkey-hash script along with the pubkey hash it is paying to if it is.
291 303
 func isPubKeyHash(
292 304
 	script []byte) (bool, []byte) {
@@ -299,6 +311,7 @@ func isPubKeyHash(
299 311
 	}
300 312
 	return false, nil
301 313
 }
314
+
302 315
 // isScriptHash returns whether or not the passed public key script is a standard pay-to-script-hash script along with the script hash it is paying to if it is.
303 316
 func isScriptHash(
304 317
 	script []byte) (bool, []byte) {
@@ -309,6 +322,7 @@ func isScriptHash(
309 322
 	}
310 323
 	return false, nil
311 324
 }
325
+
312 326
 // putCompressedScript compresses the passed script according to the domain specific compression algorithm described above directly into the passed target byte slice.  The target byte slice must be at least large enough to handle the number of bytes returned by the compressedScriptSize function or it will panic.
313 327
 func putCompressedScript(
314 328
 	target, pkScript []byte) int {
@@ -346,6 +360,7 @@ func putCompressedScript(
346 360
 	copy(target[vlqSizeLen:], pkScript)
347 361
 	return vlqSizeLen + len(pkScript)
348 362
 }
363
+
349 364
 // putCompressedTxOut compresses the passed amount and script according to their domain specific compression algorithms and encodes them directly into the passed target byte slice with the format described above.  The target byte slice must be at least large enough to handle the number of bytes returned by the compressedTxOutSize function or it will panic.
350 365
 func putCompressedTxOut(
351 366
 	target []byte, amount uint64, pkScript []byte) int {
@@ -353,6 +368,7 @@ func putCompressedTxOut(
353 368
 	offset += putCompressedScript(target[offset:], pkScript)
354 369
 	return offset
355 370
 }
371
+
356 372
 // putVLQ serializes the provided number to a variable-length quantity according to the format described above and returns the number of bytes of the encoded value.  The result is placed directly into the passed byte slice which must be at least large enough to handle the number of bytes returned by the serializeSizeVLQ function or it will panic.
357 373
 func putVLQ(
358 374
 	target []byte, n uint64) int {
@@ -375,6 +391,7 @@ func putVLQ(
375 391
 	}
376 392
 	return offset + 1
377 393
 }
394
+
378 395
 // A variable length quantity (VLQ) is an encoding that uses an arbitrary number of binary octets to represent an arbitrarily large integer.  The scheme employs a most significant byte (MSB) base-128 encoding where the high bit in each byte indicates whether or not the byte is the final one.  In addition, to ensure there are no redundant encodings, an offset is subtracted every time a group of 7 bits is shifted out.  Therefore each integer can be represented in exactly one way, and each representation stands for exactly one integer.
379 396
 // Another nice property of this encoding is that it provides a compact representation of values that are typically used to indicate sizes.  For example, the values 0 - 127 are represented with a single byte, 128 - 16511
380 397
 // with two bytes, and 16512 - 2113663 with three bytes.

+ 10
- 0
pkg/chain/compress_test.go View File

@@ -1,9 +1,13 @@
1
+// +build test
2
+
1 3
 package chain
4
+
2 5
 import (
3 6
 	"bytes"
4 7
 	"encoding/hex"
5 8
 	"testing"
6 9
 )
10
+
7 11
 // hexToBytes converts the passed hex string into bytes and will panic if there is an error.  This is only provided for the hard-coded constants so errors in the source code can be detected. It will only (and must only) be called with hard-coded values.
8 12
 func hexToBytes(
9 13
 	s string) []byte {
@@ -13,6 +17,7 @@ func hexToBytes(
13 17
 	}
14 18
 	return b
15 19
 }
20
+
16 21
 // TestVLQ ensures the variable length quantity serialization, deserialization, and size calculation works as expected.
17 22
 func TestVLQ(
18 23
 	t *testing.T) {
@@ -86,6 +91,7 @@ func TestVLQ(
86 91
 		}
87 92
 	}
88 93
 }
94
+
89 95
 // TestScriptCompression ensures the domain-specific script compression and decompression works as expected.
90 96
 func TestScriptCompression(
91 97
 	t *testing.T) {
@@ -202,6 +208,7 @@ func TestScriptCompression(
202 208
 		}
203 209
 	}
204 210
 }
211
+
205 212
 // TestScriptCompressionErrors ensures calling various functions related to script compression with incorrect data returns the expected results.
206 213
 func TestScriptCompressionErrors(
207 214
 	t *testing.T) {
@@ -225,6 +232,7 @@ func TestScriptCompressionErrors(
225 232
 			"nil decompressed script - got %x", gotScript)
226 233
 	}
227 234
 }
235
+
228 236
 // TestAmountCompression ensures the domain-specific transaction output amount compression and decompression works as expected.
229 237
 func TestAmountCompression(
230 238
 	t *testing.T) {
@@ -299,6 +307,7 @@ func TestAmountCompression(
299 307
 		}
300 308
 	}
301 309
 }
310
+
302 311
 // TestCompressedTxOut ensures the transaction output serialization and deserialization works as expected.
303 312
 func TestCompressedTxOut(
304 313
 	t *testing.T) {
@@ -382,6 +391,7 @@ func TestCompressedTxOut(
382 391
 		}
383 392
 	}
384 393
 }
394
+
385 395
 // TestTxOutCompressionErrors ensures calling various functions related to txout compression with incorrect data returns the expected results.
386 396
 func TestTxOutCompressionErrors(
387 397
 	t *testing.T) {

+ 5
- 0
pkg/chain/config/doc.go View File

@@ -1,7 +1,11 @@
1 1
 // Package chaincfg defines chain configuration parameters.
2
+//
2 3
 // In addition to the main Bitcoin network, which is intended for the transfer of monetary value, there also exists two currently active standard networks: regression test and testnet (version 3).  These networks are incompatible with each other (each sharing a different genesis block) and software should handle errors where input intended for one network is used on an application instance running on a different network. For library packages, chaincfg provides the ability to lookup chain parameters and encoding magics when passed a *Params.
4
+//
3 5
 // Older APIs not updated to the new convention of passing a *Params may lookup the parameters for a  wire.BitcoinNet using ParamsForNet, but be aware that this usage is deprecated and will be removed from chaincfg in the future. For main packages, a (typically global) var may be assigned the address of one of the standard Param vars for use as the application's "active" network.
6
+//
4 7
 // When a network parameter is needed, it may then be looked up through this variable (either directly, or hidden in a library call).
8
+//
5 9
 //  package main
6 10
 //  import (
7 11
 //          "flag"
@@ -28,5 +32,6 @@
28 32
 //          }
29 33
 //          fmt.Println(addr)
30 34
 //  }
35
+//
31 36
 // If an application does not use one of the three standard Bitcoin networks, a new Params struct may be created which defines the parameters for the non-standard network.  As a general rule of thumb, all network parameters should be unique to the network, but parameter collisions can still occur (unfortunately, this is the case with regtest and testnet3 sharing magics).
32 37
 package chaincfg

+ 15
- 0
pkg/chain/config/genesis.go View File

@@ -1,10 +1,13 @@
1 1
 package chaincfg
2
+
2 3
 import (
3 4
 	"time"
5
+
4 6
 	"git.parallelcoin.io/dev/9/pkg/chain/fork"
5 7
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
6 8
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
7 9
 )
10
+
8 11
 // genesisCoinbaseTx is the coinbase transaction for the genesis blocks for the main network, regression test network, and test network (version 3).
9 12
 var genesisCoinbaseTx = wire.MsgTx{
10 13
 	Version: 2,
@@ -45,6 +48,7 @@ var genesisCoinbaseTx = wire.MsgTx{
45 48
 	},
46 49
 	LockTime: 0,
47 50
 }
51
+
48 52
 // genesisHash is the hash of the first block in the block chain for the main network (genesis block).
49 53
 var genesisHash = chainhash.Hash([chainhash.HashSize]byte{
50 54
 	0xc7, 0xcc, 0x40, 0xc7, 0xc5, 0x4f, 0xd1, 0x39,
@@ -56,6 +60,7 @@ var genesisHash = chainhash.Hash([chainhash.HashSize]byte{
56 60
 	// 0x8b, 0xf2, 0x98, 0xcf, 0xe7, 0x3a, 0xdf, 0x1d,
57 61
 	// 0x39, 0xd1, 0x4f, 0xc5, 0xc7, 0x40, 0xcc, 0xc7,
58 62
 })
63
+
59 64
 // genesisMerkleRoot is the hash of the first transaction in the genesis block for the main network.
60 65
 var genesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{
61 66
 	// 0xc8, 0x43, 0xea, 0xe4, 0x65, 0x8e, 0x3a, 0x51,
@@ -67,6 +72,7 @@ var genesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{
67 72
 	0x56, 0xce, 0x76, 0x63, 0xc3, 0x80, 0xf2, 0xd2,
68 73
 	0x51, 0x3a, 0x8e, 0x65, 0xe4, 0xea, 0x43, 0xc8,
69 74
 })
75
+
70 76
 // genesisBlock defines the genesis block of the block chain which serves as the public transaction ledger for the main network.
71 77
 var genesisBlock = wire.MsgBlock{
72 78
 	Header: wire.BlockHeader{
@@ -79,6 +85,7 @@ var genesisBlock = wire.MsgBlock{
79 85
 	},
80 86
 	Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
81 87
 }
88
+
82 89
 // regTestGenesisHash is the hash of the first block in the block chain for the regression test network (genesis block).
83 90
 var regTestGenesisHash = chainhash.Hash([chainhash.HashSize]byte{
84 91
 	0x81, 0x91, 0x37, 0x60, 0xab, 0x59, 0x85, 0x57,
@@ -86,8 +93,10 @@ var regTestGenesisHash = chainhash.Hash([chainhash.HashSize]byte{
86 93
 	0x48, 0x7e, 0x66, 0x25, 0xc8, 0x52, 0x2a, 0xdc,
87 94
 	0x83, 0xa1, 0x0e, 0x22, 0x9e, 0xb7, 0xe9, 0x69,
88 95
 })
96
+
89 97
 // regTestGenesisMerkleRoot is the hash of the first transaction in the genesis block for the regression test network.  It is the same as the merkle root for the main network.
90 98
 var regTestGenesisMerkleRoot = genesisMerkleRoot
99
+
91 100
 // regTestGenesisBlock defines the genesis block of the block chain which serves as the public transaction ledger for the regression test network.
92 101
 var regTestGenesisBlock = wire.MsgBlock{
93 102
 	Header: wire.BlockHeader{
@@ -100,10 +109,12 @@ var regTestGenesisBlock = wire.MsgBlock{
100 109
 	},
101 110
 	Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
102 111
 }
112
+
103 113
 // testNet3GenesisMerkleRoot is the hash of the first transaction in the genesis
104 114
 // block for the test network (version 3).  It is the same as the merkle root
105 115
 // for the main network.
106 116
 var testNet3GenesisMerkleRoot = genesisMerkleRoot
117
+
107 118
 // testNet3GenesisBlock defines the genesis block of the block chain which
108 119
 // serves as the public transaction ledger for the test network (version 3).
109 120
 var testNet3GenesisBlock = wire.MsgBlock{
@@ -117,6 +128,7 @@ var testNet3GenesisBlock = wire.MsgBlock{
117 128
 	},
118 129
 	Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
119 130
 }
131
+
120 132
 // testNet3GenesisHash is the hash of the first block in the block chain for the test network (version 3).
121 133
 // var testNet3GenesisHash = chainhash.Hash([chainhash.HashSize]byte{
122 134
 // 	0xdf, 0x0c, 0xb3, 0x5f, 0x69, 0x72, 0x75, 0xe1,
@@ -125,6 +137,7 @@ var testNet3GenesisBlock = wire.MsgBlock{
125 137
 // 	0x5e, 0xa3, 0xba, 0xec, 0x41, 0x0e, 0x00, 0x00,
126 138
 // })
127 139
 var testNet3GenesisHash = testNet3GenesisBlock.Header.BlockHash()
140
+
128 141
 // simNetGenesisHash is the hash of the first block in the block chain for the
129 142
 // simulation test network.
130 143
 var simNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{
@@ -133,8 +146,10 @@ var simNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{
133 146
 	0x85, 0x4d, 0xed, 0x22, 0x2c, 0x0c, 0x1b, 0xf9,
134 147
 	0x5e, 0xa3, 0xba, 0xec, 0x41, 0x0e, 0x00, 0x00,
135 148
 })
149
+
136 150
 // simNetGenesisMerkleRoot is the hash of the first transaction in the genesis block for the simulation test network.  It is the same as the merkle root for the main network.
137 151
 var simNetGenesisMerkleRoot = genesisMerkleRoot
152
+
138 153
 // simNetGenesisBlock defines the genesis block of the block chain which serves as the public transaction ledger for the simulation test network.
139 154
 var simNetGenesisBlock = wire.MsgBlock{
140 155
 	Header: wire.BlockHeader{

+ 12
- 0
pkg/chain/config/genesis_test.go View File

@@ -1,9 +1,14 @@
1
+//+build test
2
+
1 3
 package chaincfg
4
+
2 5
 import (
3 6
 	"bytes"
4 7
 	"testing"
8
+
5 9
 	"github.com/davecgh/go-spew/spew"
6 10
 )
11
+
7 12
 // TestGenesisBlock tests the genesis block of the main network for validity by checking the encoded bytes and hashes.
8 13
 func TestGenesisBlock(
9 14
 	t *testing.T) {
@@ -27,6 +32,7 @@ func TestGenesisBlock(
27 32
 			spew.Sdump(MainNetParams.GenesisHash))
28 33
 	}
29 34
 }
35
+
30 36
 // TestRegTestGenesisBlock tests the genesis block of the regression test network for validity by checking the encoded bytes and hashes.
31 37
 func TestRegTestGenesisBlock(
32 38
 	t *testing.T) {
@@ -51,6 +57,7 @@ func TestRegTestGenesisBlock(
51 57
 			spew.Sdump(RegressionNetParams.GenesisHash))
52 58
 	}
53 59
 }
60
+
54 61
 // TestTestNet3GenesisBlock tests the genesis block of the test network (version 3) for validity by checking the encoded bytes and hashes.
55 62
 func TestTestNet3GenesisBlock(
56 63
 	t *testing.T) {
@@ -75,6 +82,7 @@ func TestTestNet3GenesisBlock(
75 82
 			spew.Sdump(TestNet3Params.GenesisHash))
76 83
 	}
77 84
 }
85
+
78 86
 // TestSimNetGenesisBlock tests the genesis block of the simulation test network for validity by checking the encoded bytes and hashes.
79 87
 func TestSimNetGenesisBlock(
80 88
 	t *testing.T) {
@@ -99,6 +107,7 @@ func TestSimNetGenesisBlock(
99 107
 			spew.Sdump(SimNetParams.GenesisHash))
100 108
 	}
101 109
 }
110
+
102 111
 // genesisBlockBytes are the wire encoded bytes for the genesis block of the main network as of protocol version 60002.
103 112
 var genesisBlockBytes = []byte{
104 113
 	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */
@@ -138,6 +147,7 @@ var genesisBlockBytes = []byte{
138 147
 	0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/
139 148
 	0xac, 0x00, 0x00, 0x00, 0x00, /* |.....|    */
140 149
 }
150
+
141 151
 // regTestGenesisBlockBytes are the wire encoded bytes for the genesis block of the regression test network as of protocol version 60002.
142 152
 var regTestGenesisBlockBytes = []byte{
143 153
 	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */
@@ -177,6 +187,7 @@ var regTestGenesisBlockBytes = []byte{
177 187
 	0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/
178 188
 	0xac, 0x00, 0x00, 0x00, 0x00, /* |.....|    */
179 189
 }
190
+
180 191
 // testNet3GenesisBlockBytes are the wire encoded bytes for the genesis block of the test network (version 3) as of protocol version 60002.
181 192
 var testNet3GenesisBlockBytes = []byte{
182 193
 	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */
@@ -216,6 +227,7 @@ var testNet3GenesisBlockBytes = []byte{
216 227
 	0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, 0x1d, 0x5f, /* |.Lp+k.._|*/
217 228
 	0xac, 0x00, 0x00, 0x00, 0x00, /* |.....|    */
218 229
 }
230
+
219 231
 // simNetGenesisBlockBytes are the wire encoded bytes for the genesis block of the simulation test network as of protocol version 70002.
220 232
 var simNetGenesisBlockBytes = []byte{
221 233
 	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* |........| */

+ 9
- 1
pkg/chain/config/params-defs.go View File

@@ -1,17 +1,20 @@
1 1
 package chaincfg
2
+
2 3
 import (
3 4
 	"encoding/hex"
4 5
 	"errors"
5 6
 	"math/big"
6 7
 	"time"
8
+
7 9
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
8 10
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
9 11
 )
12
+
10 13
 var (
11 14
 	// ErrDuplicateNet describes an error where the parameters for a Bitcoin network could not be set due to the network already being a standard network or previously-registered into this package.
12 15
 	ErrDuplicateNet = errors.New("duplicate Bitcoin network")
13 16
 	// ErrUnknownHDKeyID describes an error where the provided id which is intended to identify the network for a hierarchical deterministic private extended key is not registered.
14
-	ErrUnknownHDKeyID = errors.New("unknown hd private extended key bytes")
17
+	ErrUnknownHDKeyID    = errors.New("unknown hd private extended key bytes")
15 18
 	registeredNets       = make(map[wire.BitcoinNet]struct{})
16 19
 	pubKeyHashAddrIDs    = make(map[byte]struct{})
17 20
 	scriptHashAddrIDs    = make(map[byte]struct{})
@@ -79,11 +82,13 @@ var (
79 82
 	// TestnetTargetTimespan is the base for adjustment
80 83
 	TestnetTargetTimespan = TestnetInterval * TestnetTargetTimePerBlock
81 84
 )
85
+
82 86
 // Checkpoint identifies a known good point in the block chain.  Using checkpoints allows a few optimizations for old blocks during initial download and also prevents forks from old blocks. Each checkpoint is selected based upon several factors.  See the documentation for blockchain.IsCheckpointCandidate for details on the selection criteria.
83 87
 type Checkpoint struct {
84 88
 	Height int32
85 89
 	Hash   *chainhash.Hash
86 90
 }
91
+
87 92
 // DNSSeed identifies a DNS seed.
88 93
 type DNSSeed struct {
89 94
 	// Host defines the hostname of the seed.
@@ -91,6 +96,7 @@ type DNSSeed struct {
91 96
 	// HasFiltering defines whether the seed supports filtering by service flags (wire.ServiceFlag).
92 97
 	HasFiltering bool
93 98
 }
99
+
94 100
 // ConsensusDeployment defines details related to a specific consensus rule change that is voted in.  This is part of BIP0009.
95 101
 type ConsensusDeployment struct {
96 102
 	// BitNumber defines the specific bit number within the block version this particular soft-fork deployment refers to.
@@ -100,6 +106,7 @@ type ConsensusDeployment struct {
100 106
 	// ExpireTime is the median block time after which the attempted deployment expires.
101 107
 	ExpireTime uint64
102 108
 }
109
+
103 110
 // Constants that define the deployment offset in the deployments field of the parameters for each deployment.  This is useful to be able to get the details of a specific deployment by name.
104 111
 const (
105 112
 	// DeploymentTestDummy defines the rule change deployment ID for testing purposes.
@@ -111,6 +118,7 @@ const (
111 118
 	// NOTE: DefinedDeployments must always come last since it is used to determine how many defined deployments there currently are. DefinedDeployments is the number of currently defined deployments.
112 119
 	DefinedDeployments
113 120
 )
121
+
114 122
 // Params defines a Bitcoin network by its parameters.  These parameters may be used by Bitcoin applications to differentiate networks as well as addresses and keys for one network from those intended for use on another network.
115 123
 type Params struct {
116 124
 	// Name defines a human-readable identifier for the network.

+ 2
- 0
pkg/chain/config/params-mainnet.go View File

@@ -1,5 +1,7 @@
1 1
 package chaincfg
2
+
2 3
 import "git.parallelcoin.io/dev/9/pkg/chain/wire"
4
+
3 5
 // MainNetParams defines the network parameters for the main Bitcoin network.
4 6
 var MainNetParams = Params{
5 7
 	Name:        "mainnet",

+ 3
- 0
pkg/chain/config/params-regtest.go View File

@@ -1,8 +1,11 @@
1 1
 package chaincfg
2
+
2 3
 import (
3 4
 	"math"
5
+
4 6
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
5 7
 )
8
+
6 9
 // RegressionNetParams defines the network parameters for the regression test Bitcoin network.  Not to be confused with the test Bitcoin network (version 3), this network is sometimes simply called "testnet".
7 10
 var RegressionNetParams = Params{
8 11
 	Name:        "regtest",

+ 3
- 0
pkg/chain/config/params-simnet.go View File

@@ -1,9 +1,12 @@
1 1
 package chaincfg
2
+
2 3
 import (
3 4
 	"math"
4 5
 	"time"
6
+
5 7
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
6 8
 )
9
+
7 10
 // SimNetParams defines the network parameters for the simulation test Bitcoin network.  This network is similar to the normal test network except it is intended for private use within a group of individuals doing simulation testing.  The functionality is intended to differ in that the only nodes which are specifically specified are used to create the network rather than following normal discovery rules.  This is important as otherwise it would just turn into another public testnet.
8 11
 var SimNetParams = Params{
9 12
 	Name:        "simnet",

+ 2
- 0
pkg/chain/config/params-testnet.go View File

@@ -1,8 +1,10 @@
1 1
 package chaincfg
2
+
2 3
 import (
3 4
 	"git.parallelcoin.io/dev/9/pkg/chain/fork"
4 5
 	"git.parallelcoin.io/dev/9/pkg/chain/wire"
5 6
 )
7
+
6 8
 // TestNet3Params defines the network parameters for the test Bitcoin network (version 3).  Not to be confused with the regression test network, this network is sometimes simply called "testnet".
7 9
 var TestNet3Params = Params{
8 10
 	Name:        "testnet",

+ 12
- 0
pkg/chain/config/params.go View File

@@ -1,13 +1,17 @@
1 1
 package chaincfg
2
+
2 3
 import (
3 4
 	"math/big"
4 5
 	"strings"
6
+
5 7
 	chainhash "git.parallelcoin.io/dev/9/pkg/chain/hash"
6 8
 )
9
+
7 10
 // String returns the hostname of the DNS seed in human-readable form.
8 11
 func (d DNSSeed) String() string {
9 12
 	return d.Host
10 13
 }
14
+
11 15
 // Register registers the network parameters for a Bitcoin network.  This may error with ErrDuplicateNet if the network is already registered (either due to a previous Register call, or the network being one of the default networks). Network parameters should be registered into this package by a main package as early as possible.  Then, library packages may lookup networks or network parameters based on inputs and work regardless of the network being standard or not.
12 16
 func Register(
13 17
 	params *Params) error {
@@ -22,6 +26,7 @@ func Register(
22 26
 	bech32SegwitPrefixes[params.Bech32HRPSegwit+"1"] = struct{}{}
23 27
 	return nil
24 28
 }
29
+
25 30
 // mustRegister performs the same function as Register except it panics if there is an error.  This should only be called from package init functions.
26 31
 func mustRegister(
27 32
 	params *Params) {
@@ -29,18 +34,21 @@ func mustRegister(
29 34
 		panic("failed to register network: " + err.Error())
30 35
 	}
31 36
 }
37
+
32 38
 // IsPubKeyHashAddrID returns whether the id is an identifier known to prefix a pay-to-pubkey-hash address on any default or registered network.  This is used when decoding an address string into a specific address type.  It is up to the caller to check both this and IsScriptHashAddrID and decide whether an address is a pubkey hash address, script hash address, neither, or undeterminable (if both return true).
33 39
 func IsPubKeyHashAddrID(
34 40
 	id byte) bool {
35 41
 	_, ok := pubKeyHashAddrIDs[id]
36 42
 	return ok
37 43
 }
44
+
38 45
 // IsScriptHashAddrID returns whether the id is an identifier known to prefix a pay-to-script-hash address on any default or registered network.  This is used when decoding an address string into a specific address type.  It is up to the caller to check both this and IsPubKeyHashAddrID and decide whether an address is a pubkey hash address, script hash address, neither, or undeterminable (if both return true).
39 46
 func IsScriptHashAddrID(
40 47
 	id byte) bool {
41 48
 	_, ok := scriptHashAddrIDs[id]
42 49
 	return ok
43 50
 }
51
+
44 52
 // IsBech32SegwitPrefix returns whether the prefix is a known prefix for segwit addresses on any default or registered network.  This is used when decoding an address string into a specific address type.
45 53
 func IsBech32SegwitPrefix(
46 54
 	prefix string) bool {
@@ -48,6 +56,7 @@ func IsBech32SegwitPrefix(
48 56
 	_, ok := bech32SegwitPrefixes[prefix]
49 57
 	return ok
50 58
 }
59
+
51 60
 // HDPrivateKeyToPublicKeyID accepts a private hierarchical deterministic extended key id and returns the associated public key id.  When the provided id is not registered, the ErrUnknownHDKeyID error will be returned.
52 61
 func HDPrivateKeyToPublicKeyID(
53 62
 	id []byte) ([]byte, error) {
@@ -62,6 +71,7 @@ func HDPrivateKeyToPublicKeyID(
62 71
 	}
63 72
 	return pubBytes, nil
64 73
 }
74
+
65 75
 // newHashFromStr converts the passed big-endian hex string into a chainhash.Hash.  It only differs from the one available in chainhash in that it panics on an error since it will only (and must only) be called with hard-coded, and therefore known good, hashes.
66 76
 func newHashFromStr(
67 77
 	hexStr string) *chainhash.Hash {
@@ -80,6 +90,7 @@ func init() {
80 90
 	mustRegister(&RegressionNetParams)
81 91
 	mustRegister(&SimNetParams)
82 92
 }
93
+
83 94
 // CompactToBig converts a compact representation of a whole number N to an unsigned 32-bit number.  The representation is similar to IEEE754 floating point numbers. Like IEEE754 floating point, there are three basic components: the sign, the exponent, and the mantissa.  They are broken out as follows:
84 95
 //	* the most significant 8 bits represent the unsigned base 256 exponent
85 96
 // 	* bit 23 (the 24th bit) represents the sign bit
@@ -113,6 +124,7 @@ func CompactToBig(
113 124
 	}
114 125
 	return bn
115 126
 }
127
+