Line data Source code
1 : // Copyright (c) 2009-2010 Satoshi Nakamoto
2 : // Copyright (c) 2009-2020 The Bitcoin Core developers
3 : // Distributed under the MIT software license, see the accompanying
4 : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 :
6 : #include <validation.h>
7 :
8 : #include <arith_uint256.h>
9 : #include <chain.h>
10 : #include <chainparams.h>
11 : #include <checkqueue.h>
12 : #include <consensus/consensus.h>
13 : #include <consensus/merkle.h>
14 : #include <consensus/tx_check.h>
15 : #include <consensus/tx_verify.h>
16 : #include <consensus/validation.h>
17 : #include <cuckoocache.h>
18 : #include <flatfile.h>
19 : #include <hash.h>
20 : #include <index/txindex.h>
21 : #include <logging.h>
22 : #include <logging/timer.h>
23 : #include <node/ui_interface.h>
24 : #include <optional.h>
25 : #include <policy/fees.h>
26 : #include <policy/policy.h>
27 : #include <policy/settings.h>
28 : #include <pow.h>
29 : #include <primitives/block.h>
30 : #include <primitives/transaction.h>
31 : #include <random.h>
32 : #include <reverse_iterator.h>
33 : #include <script/script.h>
34 : #include <script/sigcache.h>
35 : #include <shutdown.h>
36 : #include <timedata.h>
37 : #include <tinyformat.h>
38 : #include <txdb.h>
39 : #include <txmempool.h>
40 : #include <uint256.h>
41 : #include <undo.h>
42 : #include <util/check.h> // For NDEBUG compile time check
43 : #include <util/moneystr.h>
44 : #include <util/rbf.h>
45 : #include <util/strencodings.h>
46 : #include <util/system.h>
47 : #include <util/translation.h>
48 : #include <validationinterface.h>
49 : #include <warnings.h>
50 :
51 : #include <string>
52 :
53 : #include <boost/algorithm/string/replace.hpp>
54 :
55 : #define MICRO 0.000001
56 : #define MILLI 0.001
57 :
58 : /**
59 : * An extra transaction can be added to a package, as long as it only has one
60 : * ancestor and is no larger than this. Not really any reason to make this
61 : * configurable as it doesn't materially change DoS parameters.
62 : */
63 : static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
64 : /** Maximum kilobytes for transactions to store for processing during reorg */
65 : static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
66 : /** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
67 : static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
68 : /** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
69 : static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
70 : /** Time to wait between writing blocks/block index to disk. */
71 : static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
72 : /** Time to wait between flushing chainstate to disk. */
73 : static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
74 : /** Maximum age of our tip for us to be considered current for fee estimation */
75 : static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
76 3840 : const std::vector<std::string> CHECKLEVEL_DOC {
77 640 : "level 0 reads the blocks from disk",
78 640 : "level 1 verifies block validity",
79 640 : "level 2 verifies undo data",
80 640 : "level 3 checks disconnection of tip blocks",
81 640 : "level 4 tries to reconnect the blocks",
82 640 : "each level includes the checks of the previous levels",
83 : };
84 :
85 500726346 : bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
86 : // First sort by most total work, ...
87 500726346 : if (pa->nChainWork > pb->nChainWork) return false;
88 286282475 : if (pa->nChainWork < pb->nChainWork) return true;
89 :
90 : // ... then by earliest time received, ...
91 1748114 : if (pa->nSequenceId < pb->nSequenceId) return false;
92 1735911 : if (pa->nSequenceId > pb->nSequenceId) return true;
93 :
94 : // Use pointer address as tie breaker (should only happen with blocks
95 : // loaded from disk, as those all have id 0).
96 1730275 : if (pa < pb) return false;
97 1730270 : if (pa > pb) return true;
98 :
99 : // Identical blocks.
100 1730254 : return false;
101 500726346 : }
102 :
103 640 : ChainstateManager g_chainman;
104 :
105 5291925 : CChainState& ChainstateActive()
106 : {
107 5291925 : LOCK(::cs_main);
108 5291925 : assert(g_chainman.m_active_chainstate);
109 : return *g_chainman.m_active_chainstate;
110 5291934 : }
111 :
112 3465754 : CChain& ChainActive()
113 : {
114 3465754 : LOCK(::cs_main);
115 3465754 : return ::ChainstateActive().m_chain;
116 3465754 : }
117 :
118 : /**
119 : * Mutex to guard access to validation specific variables, such as reading
120 : * or changing the chainstate.
121 : *
122 : * This may also need to be locked when updating the transaction pool, e.g. on
123 : * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
124 : *
125 : * The transaction pool has a separate lock to allow reading from it and the
126 : * chainstate at the same time.
127 : */
128 640 : RecursiveMutex cs_main;
129 :
130 : CBlockIndex *pindexBestHeader = nullptr;
131 640 : Mutex g_best_block_mutex;
132 640 : std::condition_variable g_best_block_cv;
133 640 : uint256 g_best_block;
134 : bool g_parallel_script_checks{false};
135 : std::atomic_bool fImporting(false);
136 : std::atomic_bool fReindex(false);
137 : bool fHavePruned = false;
138 : bool fPruneMode = false;
139 : bool fRequireStandard = true;
140 : bool fCheckBlockIndex = false;
141 : bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
142 : uint64_t nPruneTarget = 0;
143 : int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
144 :
145 640 : uint256 hashAssumeValid;
146 640 : arith_uint256 nMinimumChainWork;
147 :
148 640 : CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
149 :
150 640 : CBlockPolicyEstimator feeEstimator;
151 :
152 : // Internal stuff
153 : namespace {
154 : CBlockIndex* pindexBestInvalid = nullptr;
155 :
156 640 : RecursiveMutex cs_LastBlockFile;
157 640 : std::vector<CBlockFileInfo> vinfoBlockFile;
158 : int nLastBlockFile = 0;
159 : /** Global flag to indicate we should check to see if there are
160 : * block/undo files that should be deleted. Set on startup
161 : * or if we allocate more file space when we're in prune mode
162 : */
163 : bool fCheckForPruning = false;
164 :
165 : /** Dirty block index entries. */
166 640 : std::set<CBlockIndex*> setDirtyBlockIndex;
167 :
168 : /** Dirty block file entries. */
169 640 : std::set<int> setDirtyFileInfo;
170 : } // anon namespace
171 :
172 493802 : CBlockIndex* LookupBlockIndex(const uint256& hash)
173 : {
174 493802 : AssertLockHeld(cs_main);
175 493802 : BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash);
176 493802 : return it == g_chainman.BlockIndex().end() ? nullptr : it->second;
177 493802 : }
178 :
179 1991 : CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
180 : {
181 1991 : AssertLockHeld(cs_main);
182 :
183 : // Find the latest block common to locator and chain - we expect that
184 : // locator.vHave is sorted descending by height.
185 4287 : for (const uint256& hash : locator.vHave) {
186 2296 : CBlockIndex* pindex = LookupBlockIndex(hash);
187 2296 : if (pindex) {
188 1989 : if (chain.Contains(pindex))
189 1954 : return pindex;
190 35 : if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
191 11 : return chain.Tip();
192 : }
193 : }
194 662 : }
195 26 : return chain.Genesis();
196 1991 : }
197 :
198 640 : std::unique_ptr<CBlockTreeDB> pblocktree;
199 :
200 : // See definition for documentation
201 : static void FindFilesToPruneManual(ChainstateManager& chainman, std::set<int>& setFilesToPrune, int nManualPruneHeight);
202 : static void FindFilesToPrune(ChainstateManager& chainman, std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
203 : bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
204 : static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
205 : static FlatFileSeq BlockFileSeq();
206 : static FlatFileSeq UndoFileSeq();
207 :
208 1062192 : bool CheckFinalTx(const CTransaction &tx, int flags)
209 : {
210 1062192 : AssertLockHeld(cs_main);
211 :
212 : // By convention a negative value for flags indicates that the
213 : // current network-enforced consensus rules should be used. In
214 : // a future soft-fork scenario that would mean checking which
215 : // rules would be enforced for the next block and setting the
216 : // appropriate flags. At the present time no soft-forks are
217 : // scheduled, so no flags are set.
218 1062192 : flags = std::max(flags, 0);
219 :
220 : // CheckFinalTx() uses ::ChainActive().Height()+1 to evaluate
221 : // nLockTime because when IsFinalTx() is called within
222 : // CBlock::AcceptBlock(), the height of the block *being*
223 : // evaluated is what is used. Thus if we want to know if a
224 : // transaction can be part of the *next* block, we need to call
225 : // IsFinalTx() with one more than ::ChainActive().Height().
226 1062192 : const int nBlockHeight = ::ChainActive().Height() + 1;
227 :
228 : // BIP113 requires that time-locked transactions have nLockTime set to
229 : // less than the median time of the previous block they're contained in.
230 : // When the next block is created its previous block will be the current
231 : // chain tip, so we use that to calculate the median time passed to
232 : // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
233 1062192 : const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
234 23768 : ? ::ChainActive().Tip()->GetMedianTimePast()
235 1038424 : : GetAdjustedTime();
236 :
237 1062192 : return IsFinalTx(tx, nBlockHeight, nBlockTime);
238 : }
239 :
240 734 : bool TestLockPointValidity(const LockPoints* lp)
241 : {
242 734 : AssertLockHeld(cs_main);
243 734 : assert(lp);
244 : // If there are relative lock times then the maxInputBlock will be set
245 : // If there are no relative lock times, the LockPoints don't depend on the chain
246 734 : if (lp->maxInputBlock) {
247 : // Check whether ::ChainActive() is an extension of the block at which the LockPoints
248 : // calculation was valid. If not LockPoints are no longer valid
249 734 : if (!::ChainActive().Contains(lp->maxInputBlock)) {
250 2 : return false;
251 : }
252 : }
253 :
254 : // LockPoints still valid
255 732 : return true;
256 734 : }
257 :
258 20119 : bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints)
259 : {
260 20119 : AssertLockHeld(cs_main);
261 20119 : AssertLockHeld(pool.cs);
262 :
263 20119 : CBlockIndex* tip = ::ChainActive().Tip();
264 20119 : assert(tip != nullptr);
265 :
266 20119 : CBlockIndex index;
267 20119 : index.pprev = tip;
268 : // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate
269 : // height based locks because when SequenceLocks() is called within
270 : // ConnectBlock(), the height of the block *being*
271 : // evaluated is what is used.
272 : // Thus if we want to know if a transaction can be part of the
273 : // *next* block, we need to use one more than ::ChainActive().Height()
274 20119 : index.nHeight = tip->nHeight + 1;
275 :
276 20119 : std::pair<int, int64_t> lockPair;
277 20119 : if (useExistingLockPoints) {
278 730 : assert(lp);
279 730 : lockPair.first = lp->height;
280 730 : lockPair.second = lp->time;
281 730 : }
282 : else {
283 : // CoinsTip() contains the UTXO set for ::ChainActive().Tip()
284 19389 : CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool);
285 19389 : std::vector<int> prevheights;
286 19389 : prevheights.resize(tx.vin.size());
287 57343 : for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
288 37954 : const CTxIn& txin = tx.vin[txinIndex];
289 37954 : Coin coin;
290 37954 : if (!viewMemPool.GetCoin(txin.prevout, coin)) {
291 0 : return error("%s: Missing input", __func__);
292 : }
293 37954 : if (coin.nHeight == MEMPOOL_HEIGHT) {
294 : // Assume all mempool transaction confirm in the next block
295 3406 : prevheights[txinIndex] = tip->nHeight + 1;
296 3406 : } else {
297 34548 : prevheights[txinIndex] = coin.nHeight;
298 : }
299 37954 : }
300 19389 : lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
301 19389 : if (lp) {
302 19381 : lp->height = lockPair.first;
303 19381 : lp->time = lockPair.second;
304 : // Also store the hash of the block with the highest height of
305 : // all the blocks which have sequence locked prevouts.
306 : // This hash needs to still be on the chain
307 : // for these LockPoint calculations to be valid
308 : // Note: It is impossible to correctly calculate a maxInputBlock
309 : // if any of the sequence locked inputs depend on unconfirmed txs,
310 : // except in the special case where the relative lock time/height
311 : // is 0, which is equivalent to no sequence lock. Since we assume
312 : // input height of tip+1 for mempool txs and test the resulting
313 : // lockPair from CalculateSequenceLocks against tip+1. We know
314 : // EvaluateSequenceLocks will fail if there was a non-zero sequence
315 : // lock on a mempool input, so we can use the return value of
316 : // CheckSequenceLocks to indicate the LockPoints validity
317 19381 : int maxInputHeight = 0;
318 57327 : for (const int height : prevheights) {
319 : // Can ignore mempool inputs since we'll fail if they had non-zero locks
320 37946 : if (height != tip->nHeight+1) {
321 37087 : maxInputHeight = std::max(maxInputHeight, height);
322 37087 : }
323 37946 : }
324 19381 : lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
325 19381 : }
326 19389 : }
327 20119 : return EvaluateSequenceLocks(index, lockPair);
328 20119 : }
329 :
330 : // Returns the script flags which should be checked for a given block
331 : static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
332 :
333 19028 : static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::seconds age)
334 : EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main)
335 : {
336 19028 : int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
337 19028 : if (expired != 0) {
338 2 : LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
339 : }
340 :
341 19028 : std::vector<COutPoint> vNoSpendsRemaining;
342 19028 : pool.TrimToSize(limit, &vNoSpendsRemaining);
343 19054 : for (const COutPoint& removed : vNoSpendsRemaining)
344 26 : ::ChainstateActive().CoinsTip().Uncache(removed);
345 19028 : }
346 :
347 18432 : static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
348 : {
349 18432 : AssertLockHeld(cs_main);
350 18432 : if (::ChainstateActive().IsInitialBlockDownload())
351 171 : return false;
352 18261 : if (::ChainActive().Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
353 104 : return false;
354 18157 : if (::ChainActive().Height() < pindexBestHeader->nHeight - 1)
355 0 : return false;
356 18157 : return true;
357 18432 : }
358 :
359 : /* Make mempool consistent after a reorg, by re-adding or recursively erasing
360 : * disconnected block transactions from the mempool, and also removing any
361 : * other transactions from the mempool that are no longer valid given the new
362 : * tip/height.
363 : *
364 : * Note: we assume that disconnectpool only contains transactions that are NOT
365 : * confirmed in the current chain nor already in the mempool (otherwise,
366 : * in-mempool descendants of such transactions would be removed).
367 : *
368 : * Passing fAddToMempool=false will skip trying to add the transactions back,
369 : * and instead just erase from the mempool as needed.
370 : */
371 :
372 488 : static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs)
373 : {
374 488 : AssertLockHeld(cs_main);
375 488 : AssertLockHeld(mempool.cs);
376 488 : std::vector<uint256> vHashUpdate;
377 : // disconnectpool's insertion_order index sorts the entries from
378 : // oldest to newest, but the oldest entry will be the last tx from the
379 : // latest mined block that was disconnected.
380 : // Iterate disconnectpool in reverse, so that we add transactions
381 : // back to the mempool starting with the earliest transaction that had
382 : // been previously seen in a block.
383 488 : auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
384 6531 : while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
385 : // ignore validation errors in resurrected transactions
386 6043 : TxValidationState stateDummy;
387 10005 : if (!fAddToMempool || (*it)->IsCoinBase() ||
388 3962 : !AcceptToMemoryPool(mempool, stateDummy, *it,
389 : nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
390 : // If the transaction doesn't make it in to the mempool, remove any
391 : // transactions that depend on it (which would now be orphans).
392 5761 : mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
393 282 : } else if (mempool.exists((*it)->GetHash())) {
394 282 : vHashUpdate.push_back((*it)->GetHash());
395 : }
396 6043 : ++it;
397 6043 : }
398 488 : disconnectpool.queuedTx.clear();
399 : // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
400 : // no in-mempool children, which is generally not true when adding
401 : // previously-confirmed transactions back to the mempool.
402 : // UpdateTransactionsFromBlock finds descendants of any transactions in
403 : // the disconnectpool that were added back and cleans up the mempool state.
404 488 : mempool.UpdateTransactionsFromBlock(vHashUpdate);
405 :
406 : // We also need to remove any now-immature transactions
407 488 : mempool.removeForReorg(&::ChainstateActive().CoinsTip(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
408 : // Re-limit mempool size, in case we added any transactions
409 488 : LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
410 488 : }
411 :
412 : // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
413 : // were somehow broken and returning the wrong scriptPubKeys
414 18837 : static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
415 : unsigned int flags, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
416 18837 : AssertLockHeld(cs_main);
417 :
418 : // pool.cs should be locked already, but go ahead and re-take the lock here
419 : // to enforce that mempool doesn't change between when we check the view
420 : // and when we actually call through to CheckInputScripts
421 18837 : LOCK(pool.cs);
422 :
423 18837 : assert(!tx.IsCoinBase());
424 53990 : for (const CTxIn& txin : tx.vin) {
425 35153 : const Coin& coin = view.AccessCoin(txin.prevout);
426 :
427 : // AcceptToMemoryPoolWorker has already checked that the coins are
428 : // available, so this shouldn't fail. If the inputs are not available
429 : // here then return false.
430 35153 : if (coin.IsSpent()) return false;
431 :
432 : // Check equivalence for available inputs.
433 35153 : const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
434 35153 : if (txFrom) {
435 3213 : assert(txFrom->GetHash() == txin.prevout.hash);
436 3213 : assert(txFrom->vout.size() > txin.prevout.n);
437 3213 : assert(txFrom->vout[txin.prevout.n] == coin.out);
438 : } else {
439 31940 : const Coin& coinFromDisk = ::ChainstateActive().CoinsTip().AccessCoin(txin.prevout);
440 31940 : assert(!coinFromDisk.IsSpent());
441 31940 : assert(coinFromDisk.out == coin.out);
442 0 : }
443 70306 : }
444 :
445 : // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
446 18837 : return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
447 18837 : }
448 :
449 : namespace {
450 :
451 46642 : class MemPoolAccept
452 : {
453 : public:
454 46642 : MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
455 23321 : m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
456 23321 : m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
457 23321 : m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
458 46642 : m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {}
459 :
460 : // We put the arguments we're handed into a struct, so we can pass them
461 : // around easier.
462 : struct ATMPArgs {
463 : const CChainParams& m_chainparams;
464 : TxValidationState &m_state;
465 : const int64_t m_accept_time;
466 : std::list<CTransactionRef>* m_replaced_transactions;
467 : const bool m_bypass_limits;
468 : const CAmount& m_absurd_fee;
469 : /*
470 : * Return any outpoints which were not previously present in the coins
471 : * cache, but were added as a result of validating the tx for mempool
472 : * acceptance. This allows the caller to optionally remove the cache
473 : * additions if the associated transaction ends up being rejected by
474 : * the mempool.
475 : */
476 : std::vector<COutPoint>& m_coins_to_uncache;
477 : const bool m_test_accept;
478 : };
479 :
480 : // Single transaction acceptance
481 : bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
482 :
483 : private:
484 : // All the intermediate state that gets passed between the various levels
485 : // of checking a given transaction.
486 46642 : struct Workspace {
487 46642 : Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
488 603 : std::set<uint256> m_conflicts;
489 : CTxMemPool::setEntries m_all_conflicting;
490 603 : CTxMemPool::setEntries m_ancestors;
491 : std::unique_ptr<CTxMemPoolEntry> m_entry;
492 603 :
493 : bool m_replacement_transaction;
494 : CAmount m_modified_fees;
495 : CAmount m_conflicting_fees;
496 : size_t m_conflicting_size;
497 :
498 : const CTransactionRef& m_ptx;
499 : const uint256& m_hash;
500 : };
501 :
502 : // Run the policy checks on a given transaction, excluding any script checks.
503 : // Looks up inputs, calculates feerate, considers replacement, evaluates
504 : // package limits, etc. As this function can be invoked for "free" by a peer,
505 : // only tests that are fast should be done here (to avoid CPU DoS).
506 603 : bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
507 :
508 : // Run the script checks using our policy flags. As this can be slow, we should
509 : // only invoke this on transactions that have otherwise passed policy checks.
510 : bool PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
511 :
512 : // Re-run the script checks, using consensus flags, and try to cache the
513 : // result in the scriptcache. This should be done after
514 : // PolicyScriptChecks(). This requires that all inputs either be in our
515 : // utxo set or in the mempool.
516 : bool ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
517 :
518 : // Try to add the transaction to the mempool, removing any conflicts first.
519 : // Returns true if the transaction is in the mempool after any size
520 : // limiting is performed, false otherwise.
521 : bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
522 :
523 : // Compare a package's feerate against minimum allowed.
524 18647 : bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state)
525 : {
526 18647 : CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
527 18647 : if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
528 1 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
529 : }
530 :
531 18646 : if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
532 8 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
533 : }
534 18638 : return true;
535 18647 : }
536 :
537 : private:
538 : CTxMemPool& m_pool;
539 : CCoinsViewCache m_view;
540 : CCoinsViewMemPool m_viewmempool;
541 : CCoinsView m_dummy;
542 :
543 : // The package limits in effect at the time of invocation.
544 : const size_t m_limit_ancestors;
545 : const size_t m_limit_ancestor_size;
546 : // These may be modified while evaluating a transaction (eg to account for
547 : // in-mempool conflicts; see below).
548 : size_t m_limit_descendants;
549 : size_t m_limit_descendant_size;
550 : };
551 :
552 23321 : bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
553 : {
554 23321 : const CTransactionRef& ptx = ws.m_ptx;
555 23321 : const CTransaction& tx = *ws.m_ptx;
556 23321 : const uint256& hash = ws.m_hash;
557 :
558 : // Copy/alias what we need out of args
559 23321 : TxValidationState &state = args.m_state;
560 23321 : const int64_t nAcceptTime = args.m_accept_time;
561 23321 : const bool bypass_limits = args.m_bypass_limits;
562 23321 : const CAmount& nAbsurdFee = args.m_absurd_fee;
563 23321 : std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
564 :
565 : // Alias what we need out of ws
566 23321 : std::set<uint256>& setConflicts = ws.m_conflicts;
567 23321 : CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
568 23321 : CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
569 23321 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
570 23321 : bool& fReplacementTransaction = ws.m_replacement_transaction;
571 23321 : CAmount& nModifiedFees = ws.m_modified_fees;
572 23321 : CAmount& nConflictingFees = ws.m_conflicting_fees;
573 23321 : size_t& nConflictingSize = ws.m_conflicting_size;
574 :
575 23321 : if (!CheckTransaction(tx, state)) {
576 12 : return false; // state filled in by CheckTransaction
577 : }
578 :
579 : // Coinbase is only valid in a block, not as a loose transaction
580 23309 : if (tx.IsCoinBase())
581 2 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
582 :
583 : // Rather not work on nonstandard transactions (unless -testnet/-regtest)
584 23307 : std::string reason;
585 23307 : if (fRequireStandard && !IsStandardTx(tx, reason))
586 875 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
587 :
588 : // Do not work on transactions that are too small.
589 603 : // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
590 : // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
591 : // 64-byte transactions.
592 23035 : if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
593 6 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
594 :
595 : // Only accept nLockTime-using transactions that can be mined in the next
596 : // block; we don't want our mempool filled up with transactions that can't
597 : // be mined yet.
598 23029 : if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
599 76 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
600 :
601 : // is it already in the memory pool?
602 22953 : if (m_pool.exists(hash)) {
603 4 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
604 : }
605 :
606 : // Check for conflicts with in-memory transactions
607 67801 : for (const CTxIn &txin : tx.vin)
608 : {
609 44852 : const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
610 44852 : if (ptxConflicting) {
611 329 : if (!setConflicts.count(ptxConflicting->GetHash()))
612 : {
613 : // Allow opt-out of transaction replacement by setting
614 : // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
615 : //
616 : // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
617 : // non-replaceable transactions. All inputs rather than just one
618 : // is for the sake of multi-party protocols, where we don't
619 : // want a single party to be able to disable replacement.
620 : //
621 : // The opt-out ignores descendants as anyone relying on
622 : // first-seen mempool behavior should be checking all
623 : // unconfirmed ancestors anyway; doing otherwise is hopelessly
624 : // insecure.
625 : bool fReplacementOptOut = true;
626 655 : for (const CTxIn &_txin : ptxConflicting->vin)
627 : {
628 328 : if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
629 : {
630 : fReplacementOptOut = false;
631 323 : break;
632 : }
633 5 : }
634 327 : if (fReplacementOptOut) {
635 4 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
636 : }
637 :
638 323 : setConflicts.insert(ptxConflicting->GetHash());
639 323 : }
640 : }
641 89696 : }
642 :
643 22945 : LockPoints lp;
644 22945 : m_view.SetBackend(m_viewmempool);
645 :
646 22945 : CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
647 : // do all inputs exist?
648 64455 : for (const CTxIn& txin : tx.vin) {
649 41510 : if (!coins_cache.HaveCoinInCache(txin.prevout)) {
650 8983 : coins_to_uncache.push_back(txin.prevout);
651 : }
652 :
653 : // Note: this call may add txin.prevout to the coins cache
654 : // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
655 : // later (via coins_to_uncache) if this tx turns out to be invalid.
656 41510 : if (!m_view.HaveCoin(txin.prevout)) {
657 : // Are inputs missing because we already have the tx?
658 7144 : for (size_t out = 0; out < tx.vout.size(); out++) {
659 : // Optimistically just do efficient check of cache for outputs
660 3584 : if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
661 6 : return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
662 : }
663 : }
664 : // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
665 3560 : return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
666 : }
667 37944 : }
668 :
669 : // Bring the best block into scope
670 19379 : m_view.GetBestBlock();
671 :
672 : // we have all inputs cached now, so switch back to dummy (to protect
673 : // against bugs where we pull more inputs from disk that miss being added
674 : // to coins_to_uncache)
675 19379 : m_view.SetBackend(m_dummy);
676 :
677 : // Only accept BIP68 sequence locked transactions that can be mined in the next
678 : // block; we don't want our mempool filled up with transactions that can't
679 : // be mined yet.
680 : // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
681 : // CoinsViewCache instead of create its own
682 19379 : if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
683 378 : return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
684 :
685 19001 : CAmount nFees = 0;
686 19001 : if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) {
687 3 : return false; // state filled in by CheckTxInputs
688 : }
689 :
690 : // Check for non-standard pay-to-script-hash in inputs
691 18998 : if (fRequireStandard && !AreInputsStandard(tx, m_view)) {
692 18 : return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
693 : }
694 :
695 : // Check for non-standard witness in P2WSH
696 18980 : if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
697 7 : return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
698 :
699 18973 : int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
700 :
701 : // nModifiedFees includes any fee deltas from PrioritiseTransaction
702 18973 : nModifiedFees = nFees;
703 18973 : m_pool.ApplyDelta(hash, nModifiedFees);
704 :
705 : // Keep track of transactions that spend a coinbase, which we re-scan
706 : // during reorgs to ensure COINBASE_MATURITY is still met.
707 : bool fSpendsCoinbase = false;
708 52810 : for (const CTxIn &txin : tx.vin) {
709 33837 : const Coin &coin = m_view.AccessCoin(txin.prevout);
710 33837 : if (coin.IsCoinBase()) {
711 : fSpendsCoinbase = true;
712 1186 : break;
713 : }
714 65302 : }
715 :
716 18973 : entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
717 18973 : fSpendsCoinbase, nSigOpsCost, lp));
718 18973 : unsigned int nSize = entry->GetTxSize();
719 :
720 18973 : if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
721 8 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
722 4 : strprintf("%d", nSigOpsCost));
723 :
724 : // No transactions are allowed below minRelayTxFee except from disconnected
725 : // blocks
726 18969 : if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
727 :
728 18960 : if (nAbsurdFee && nFees > nAbsurdFee)
729 8 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD,
730 4 : "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee));
731 :
732 18956 : const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
733 : // Calculate in-mempool ancestors, up to a limit.
734 18956 : if (setConflicts.size() == 1) {
735 : // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
736 : // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
737 : // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
738 : // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
739 : // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
740 : // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
741 : // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
742 : // for off-chain contract systems (see link in the comment below).
743 : //
744 : // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
745 : // conflict directly with exactly one other transaction (but may evict children of said transaction),
746 : // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
747 : // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
748 : // amended, we may need to move that check to here instead of removing it wholesale.
749 : //
750 : // Such transactions are clearly not merging any existing packages, so we are only concerned with
751 : // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
752 : // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
753 : // to.
754 : //
755 : // To check these we first check if we meet the RBF criteria, above, and increment the descendant
756 : // limits by the direct conflict and its descendants (as these are recalculated in
757 : // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
758 : // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
759 : // the ancestor limits should be the same for both our new transaction and any conflicts).
760 : // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
761 : // into force here (as we're only adding a single transaction).
762 122 : assert(setIterConflicting.size() == 1);
763 122 : CTxMemPool::txiter conflict = *setIterConflicting.begin();
764 :
765 122 : m_limit_descendants += 1;
766 122 : m_limit_descendant_size += conflict->GetSizeWithDescendants();
767 122 : }
768 :
769 18956 : std::string errString;
770 18956 : if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
771 67 : setAncestors.clear();
772 : // If CalculateMemPoolAncestors fails second time, we want the original error string.
773 67 : std::string dummy_err_string;
774 : // Contracting/payment channels CPFP carve-out:
775 : // If the new transaction is relatively small (up to 40k weight)
776 : // and has at most one ancestor (ie ancestor limit of 2, including
777 : // the new transaction), allow it if its parent has exactly the
778 : // descendant limit descendants.
779 : //
780 : // This allows protocols which rely on distrusting counterparties
781 : // being able to broadcast descendants of an unconfirmed transaction
782 : // to be secure by simply only having two immediately-spendable
783 : // outputs - one for each counterparty. For more info on the uses for
784 : // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
785 114 : if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
786 47 : !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
787 63 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
788 : }
789 67 : }
790 :
791 : // A transaction that spends outputs that would be replaced by it is invalid. Now
792 : // that we have the set of all ancestors we can detect this
793 : // pathological case by making sure setConflicts and setAncestors don't
794 : // intersect.
795 26100 : for (CTxMemPool::txiter ancestorIt : setAncestors)
796 : {
797 7207 : const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
798 7207 : if (setConflicts.count(hashAncestor))
799 : {
800 4 : return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
801 4 : strprintf("%s spends conflicting transaction %s",
802 2 : hash.ToString(),
803 2 : hashAncestor.ToString()));
804 : }
805 14412 : }
806 :
807 : // Check if it's economically rational to mine this transaction rather
808 : // than the ones it replaces.
809 18891 : nConflictingFees = 0;
810 18891 : nConflictingSize = 0;
811 18891 : uint64_t nConflictingCount = 0;
812 :
813 : // If we don't hold the lock allConflicting might be incomplete; the
814 : // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
815 : // mempool consistency for us.
816 18891 : fReplacementTransaction = setConflicts.size();
817 18891 : if (fReplacementTransaction)
818 : {
819 122 : CFeeRate newFeeRate(nModifiedFees, nSize);
820 122 : std::set<uint256> setConflictsParents;
821 122 : const int maxDescendantsToVisit = 100;
822 443 : for (const auto& mi : setIterConflicting) {
823 : // Don't allow the replacement to reduce the feerate of the
824 : // mempool.
825 : //
826 : // We usually don't want to accept replacements with lower
827 : // feerates than what they replaced as that would lower the
828 : // feerate of the next block. Requiring that the feerate always
829 : // be increased is also an easy-to-reason about way to prevent
830 : // DoS attacks via replacements.
831 : //
832 : // We only consider the feerates of transactions being directly
833 : // replaced, not their indirect descendants. While that does
834 : // mean high feerate children are ignored when deciding whether
835 : // or not to replace, we do require the replacement to pay more
836 : // overall fees too, mitigating most cases.
837 321 : CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
838 321 : if (newFeeRate <= oldFeeRate)
839 : {
840 12 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
841 12 : strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
842 6 : hash.ToString(),
843 6 : newFeeRate.ToString(),
844 6 : oldFeeRate.ToString()));
845 : }
846 :
847 633 : for (const CTxIn &txin : mi->GetTx().vin)
848 : {
849 318 : setConflictsParents.insert(txin.prevout.hash);
850 : }
851 :
852 315 : nConflictingCount += mi->GetCountWithDescendants();
853 321 : }
854 : // This potentially overestimates the number of actual descendants
855 : // but we just want to be conservative to avoid doing too much
856 : // work.
857 116 : if (nConflictingCount <= maxDescendantsToVisit) {
858 : // If not too many to replace, then calculate the set of
859 : // transactions that would have to be evicted
860 325 : for (CTxMemPool::txiter it : setIterConflicting) {
861 212 : m_pool.CalculateDescendants(it, allConflicting);
862 0 : }
863 605 : for (CTxMemPool::txiter it : allConflicting) {
864 492 : nConflictingFees += it->GetModifiedFee();
865 492 : nConflictingSize += it->GetTxSize();
866 492 : }
867 : } else {
868 6 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
869 3 : strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
870 3 : hash.ToString(),
871 : nConflictingCount,
872 : maxDescendantsToVisit));
873 : }
874 :
875 341 : for (unsigned int j = 0; j < tx.vin.size(); j++)
876 : {
877 : // We don't want to accept replacements that require low
878 : // feerate junk to be mined first. Ideally we'd keep track of
879 : // the ancestor feerates and make the decision based on that,
880 : // but for now requiring all new inputs to be confirmed works.
881 : //
882 : // Note that if you relax this to make RBF a little more useful,
883 : // this may break the CalculateMempoolAncestors RBF relaxation,
884 : // above. See the comment above the first CalculateMempoolAncestors
885 : // call for more info.
886 228 : if (!setConflictsParents.count(tx.vin[j].prevout.hash))
887 : {
888 : // Rather than check the UTXO set - potentially expensive -
889 : // it's cheaper to just check if the new input refers to a
890 : // tx that's in the mempool.
891 13 : if (m_pool.exists(tx.vin[j].prevout.hash)) {
892 2 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
893 1 : strprintf("replacement %s adds unconfirmed input, idx %d",
894 1 : hash.ToString(), j));
895 : }
896 : }
897 : }
898 :
899 : // The replacement must pay greater fees than the transactions it
900 : // replaces - if we did the bandwidth used by those conflicting
901 : // transactions would not be paid for.
902 112 : if (nModifiedFees < nConflictingFees)
903 : {
904 4 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
905 4 : strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
906 2 : hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
907 : }
908 :
909 : // Finally in addition to paying more fees than the conflicts the
910 : // new transaction must pay for its own bandwidth.
911 110 : CAmount nDeltaFees = nModifiedFees - nConflictingFees;
912 110 : if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
913 : {
914 0 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
915 0 : strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
916 0 : hash.ToString(),
917 0 : FormatMoney(nDeltaFees),
918 0 : FormatMoney(::incrementalRelayFee.GetFee(nSize))));
919 : }
920 122 : }
921 18879 : return true;
922 23321 : }
923 :
924 18879 : bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
925 : {
926 18879 : const CTransaction& tx = *ws.m_ptx;
927 :
928 18879 : TxValidationState &state = args.m_state;
929 :
930 : constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
931 :
932 : // Check input scripts and signatures.
933 : // This is done last to help prevent CPU exhaustion denial-of-service attacks.
934 18879 : if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
935 : // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
936 : // need to turn both off, and compare against just turning off CLEANSTACK
937 : // to see if the failure is specifically due to witness validation.
938 42 : TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
939 51 : if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
940 9 : !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
941 : // Only the witness is missing, so the transaction itself may be fine.
942 18 : state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
943 9 : state.GetRejectReason(), state.GetDebugMessage());
944 9 : }
945 : return false; // state filled in by CheckInputScripts
946 42 : }
947 :
948 18837 : return true;
949 18879 : }
950 :
951 18837 : bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
952 : {
953 18837 : const CTransaction& tx = *ws.m_ptx;
954 18837 : const uint256& hash = ws.m_hash;
955 :
956 18837 : TxValidationState &state = args.m_state;
957 18837 : const CChainParams& chainparams = args.m_chainparams;
958 :
959 : // Check again against the current block tip's script verification
960 : // flags to cache our script execution flags. This is, of course,
961 : // useless if the next block has different script flags from the
962 : // previous one, but because the cache tracks script flags for us it
963 : // will auto-invalidate and we'll just have a few blocks of extra
964 : // misses on soft-fork activation.
965 : //
966 : // This is also useful in case of bugs in the standard flags that cause
967 : // transactions to pass as valid when they're actually invalid. For
968 : // instance the STRICTENC flag was incorrectly allowing certain
969 : // CHECKSIG NOT scripts to pass, even though they were invalid.
970 : //
971 : // There is a similar check in CreateNewBlock() to prevent creating
972 : // invalid blocks (using TestBlockValidity), however allowing such
973 : // transactions into the mempool can be exploited as a DoS attack.
974 18837 : unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
975 18837 : if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) {
976 0 : return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
977 0 : __func__, hash.ToString(), state.ToString());
978 : }
979 :
980 18837 : return true;
981 18837 : }
982 :
983 18825 : bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws)
984 : {
985 18825 : const CTransaction& tx = *ws.m_ptx;
986 18825 : const uint256& hash = ws.m_hash;
987 18825 : TxValidationState &state = args.m_state;
988 18825 : const bool bypass_limits = args.m_bypass_limits;
989 :
990 18825 : CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
991 18825 : CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
992 18825 : const CAmount& nModifiedFees = ws.m_modified_fees;
993 18825 : const CAmount& nConflictingFees = ws.m_conflicting_fees;
994 18825 : const size_t& nConflictingSize = ws.m_conflicting_size;
995 18825 : const bool fReplacementTransaction = ws.m_replacement_transaction;
996 18825 : std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
997 :
998 : // Remove conflicting transactions from the mempool
999 19173 : for (CTxMemPool::txiter it : allConflicting)
1000 : {
1001 348 : LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
1002 : it->GetTx().GetHash().ToString(),
1003 : hash.ToString(),
1004 : FormatMoney(nModifiedFees - nConflictingFees),
1005 : (int)entry->GetTxSize() - (int)nConflictingSize);
1006 348 : if (args.m_replaced_transactions)
1007 12 : args.m_replaced_transactions->push_back(it->GetSharedTx());
1008 348 : }
1009 18825 : m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
1010 :
1011 : // This transaction should only count for fee estimation if:
1012 : // - it isn't a BIP 125 replacement transaction (may not be widely supported)
1013 : // - it's not being re-added during a reorg which bypasses typical mempool fee limits
1014 : // - the node is not behind
1015 : // - the transaction is not dependent on any other transactions in the mempool
1016 18825 : bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx);
1017 :
1018 : // Store transaction in memory
1019 18825 : m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
1020 :
1021 : // trim mempool and check if tx was trimmed
1022 18825 : if (!bypass_limits) {
1023 18540 : LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
1024 18540 : if (!m_pool.exists(hash))
1025 0 : return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1026 : }
1027 18825 : return true;
1028 18825 : }
1029 :
1030 23321 : bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
1031 : {
1032 23321 : AssertLockHeld(cs_main);
1033 23321 : LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
1034 :
1035 23321 : Workspace workspace(ptx);
1036 :
1037 23321 : if (!PreChecks(args, workspace)) return false;
1038 :
1039 : // Only compute the precomputed transaction data if we need to verify
1040 : // scripts (ie, other policy checks pass). We perform the inexpensive
1041 : // checks first and avoid hashing and signature verification unless those
1042 : // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1043 18879 : PrecomputedTransactionData txdata;
1044 :
1045 18879 : if (!PolicyScriptChecks(args, workspace, txdata)) return false;
1046 :
1047 18837 : if (!ConsensusScriptChecks(args, workspace, txdata)) return false;
1048 :
1049 : // Tx was accepted, but not added
1050 18837 : if (args.m_test_accept) return true;
1051 :
1052 18825 : if (!Finalize(args, workspace)) return false;
1053 :
1054 18825 : GetMainSignals().TransactionAddedToMempool(ptx);
1055 :
1056 18825 : return true;
1057 23321 : }
1058 :
1059 : } // anon namespace
1060 :
1061 : /** (try to) add transaction to memory pool with a specified acceptance time **/
1062 23321 : static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
1063 : int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
1064 : bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1065 : {
1066 23321 : std::vector<COutPoint> coins_to_uncache;
1067 23321 : MemPoolAccept::ATMPArgs args { chainparams, state, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept };
1068 23321 : bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
1069 23321 : if (!res) {
1070 : // Remove coins that were not present in the coins cache before calling ATMPW;
1071 : // this is to prevent memory DoS in case we receive a large number of
1072 : // invalid transactions that attempt to overrun the in-memory coins cache
1073 : // (`CCoinsViewCache::cacheCoins`).
1074 :
1075 8247 : for (const COutPoint& hashTx : coins_to_uncache)
1076 3763 : ::ChainstateActive().CoinsTip().Uncache(hashTx);
1077 4484 : }
1078 : // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1079 23321 : BlockValidationState state_dummy;
1080 23321 : ::ChainstateActive().FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC);
1081 : return res;
1082 23321 : }
1083 :
1084 23216 : bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
1085 : std::list<CTransactionRef>* plTxnReplaced,
1086 : bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
1087 : {
1088 23216 : const CChainParams& chainparams = Params();
1089 23216 : return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept);
1090 : }
1091 :
1092 675 : CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
1093 : {
1094 675 : LOCK(cs_main);
1095 :
1096 675 : if (block_index) {
1097 74 : CBlock block;
1098 74 : if (ReadBlockFromDisk(block, block_index, consensusParams)) {
1099 258 : for (const auto& tx : block.vtx) {
1100 184 : if (tx->GetHash() == hash) {
1101 73 : hashBlock = block_index->GetBlockHash();
1102 73 : return tx;
1103 : }
1104 111 : }
1105 : }
1106 1 : return nullptr;
1107 74 : }
1108 601 : if (mempool) {
1109 597 : CTransactionRef ptx = mempool->get(hash);
1110 597 : if (ptx) return ptx;
1111 597 : }
1112 11 : if (g_txindex) {
1113 8 : CTransactionRef tx;
1114 8 : if (g_txindex->FindTx(hash, hashBlock, tx)) return tx;
1115 8 : }
1116 3 : return nullptr;
1117 675 : }
1118 :
1119 : //////////////////////////////////////////////////////////////////////////////
1120 : //
1121 : // CBlock and CBlockIndex
1122 : //
1123 :
1124 44088 : static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
1125 : {
1126 : // Open history file to append
1127 44088 : CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
1128 44088 : if (fileout.IsNull())
1129 0 : return error("WriteBlockToDisk: OpenBlockFile failed");
1130 :
1131 : // Write index header
1132 44088 : unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
1133 44088 : fileout << messageStart << nSize;
1134 :
1135 : // Write block
1136 44088 : long fileOutPos = ftell(fileout.Get());
1137 44088 : if (fileOutPos < 0)
1138 0 : return error("WriteBlockToDisk: ftell failed");
1139 44088 : pos.nPos = (unsigned int)fileOutPos;
1140 44088 : fileout << block;
1141 :
1142 44088 : return true;
1143 44088 : }
1144 :
1145 98471 : bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
1146 : {
1147 98471 : block.SetNull();
1148 :
1149 : // Open history file to read
1150 98471 : CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
1151 98471 : if (filein.IsNull())
1152 108 : return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
1153 :
1154 : // Read block
1155 : try {
1156 98363 : filein >> block;
1157 0 : }
1158 : catch (const std::exception& e) {
1159 0 : return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
1160 0 : }
1161 :
1162 : // Check the header
1163 98361 : if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
1164 0 : return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
1165 :
1166 98363 : return true;
1167 98471 : }
1168 :
1169 98353 : bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
1170 : {
1171 98353 : FlatFilePos blockPos;
1172 : {
1173 98353 : LOCK(cs_main);
1174 98353 : blockPos = pindex->GetBlockPos();
1175 98353 : }
1176 :
1177 98353 : if (!ReadBlockFromDisk(block, blockPos, consensusParams))
1178 108 : return false;
1179 98245 : if (block.GetHash() != pindex->GetBlockHash())
1180 0 : return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1181 0 : pindex->ToString(), pindex->GetBlockPos().ToString());
1182 98245 : return true;
1183 98353 : }
1184 :
1185 6213 : bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
1186 : {
1187 6213 : FlatFilePos hpos = pos;
1188 6213 : hpos.nPos -= 8; // Seek back 8 bytes for meta header
1189 6213 : CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
1190 6213 : if (filein.IsNull()) {
1191 0 : return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
1192 : }
1193 :
1194 : try {
1195 6213 : CMessageHeader::MessageStartChars blk_start;
1196 6213 : unsigned int blk_size;
1197 :
1198 6213 : filein >> blk_start >> blk_size;
1199 :
1200 6213 : if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
1201 0 : return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
1202 0 : HexStr(blk_start),
1203 0 : HexStr(message_start));
1204 : }
1205 :
1206 6213 : if (blk_size > MAX_SIZE) {
1207 0 : return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
1208 : blk_size, MAX_SIZE);
1209 : }
1210 :
1211 6213 : block.resize(blk_size); // Zeroing of memory is intentional here
1212 6213 : filein.read((char*)block.data(), blk_size);
1213 6213 : } catch(const std::exception& e) {
1214 0 : return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
1215 0 : }
1216 :
1217 6213 : return true;
1218 6213 : }
1219 :
1220 6213 : bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
1221 : {
1222 6213 : FlatFilePos block_pos;
1223 : {
1224 6213 : LOCK(cs_main);
1225 6213 : block_pos = pindex->GetBlockPos();
1226 6213 : }
1227 :
1228 12426 : return ReadRawBlockFromDisk(block, block_pos, message_start);
1229 6213 : }
1230 :
1231 102821 : CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1232 : {
1233 102821 : int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1234 : // Force block reward to zero when right shift is undefined.
1235 102821 : if (halvings >= 64)
1236 563 : return 0;
1237 :
1238 : CAmount nSubsidy = 50 * COIN;
1239 : // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1240 102258 : nSubsidy >>= halvings;
1241 : return nSubsidy;
1242 102821 : }
1243 :
1244 1809 : CoinsViews::CoinsViews(
1245 : std::string ldb_name,
1246 : size_t cache_size_bytes,
1247 : bool in_memory,
1248 603 : bool should_wipe) : m_dbview(
1249 603 : GetDataDir() / ldb_name, cache_size_bytes, in_memory, should_wipe),
1250 1206 : m_catcherview(&m_dbview) {}
1251 :
1252 603 : void CoinsViews::InitCache()
1253 : {
1254 603 : m_cacheview = MakeUnique<CCoinsViewCache>(&m_catcherview);
1255 603 : }
1256 :
1257 1206 : CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, uint256 from_snapshot_blockhash)
1258 603 : : m_blockman(blockman),
1259 603 : m_mempool(mempool),
1260 1809 : m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1261 :
1262 603 : void CChainState::InitCoinsDB(
1263 : size_t cache_size_bytes,
1264 : bool in_memory,
1265 : bool should_wipe,
1266 : std::string leveldb_name)
1267 : {
1268 603 : if (!m_from_snapshot_blockhash.IsNull()) {
1269 2 : leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
1270 2 : }
1271 :
1272 603 : m_coins_views = MakeUnique<CoinsViews>(
1273 : leveldb_name, cache_size_bytes, in_memory, should_wipe);
1274 603 : }
1275 :
1276 603 : void CChainState::InitCoinsCache(size_t cache_size_bytes)
1277 : {
1278 603 : assert(m_coins_views != nullptr);
1279 603 : m_coinstip_cache_size_bytes = cache_size_bytes;
1280 603 : m_coins_views->InitCache();
1281 603 : }
1282 :
1283 : // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1284 : // is a performance-related implementation detail. This function must be marked
1285 : // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
1286 : // can call it.
1287 : //
1288 932699 : bool CChainState::IsInitialBlockDownload() const
1289 : {
1290 : // Optimization: pre-test latch before taking the lock.
1291 932699 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1292 877414 : return false;
1293 :
1294 55285 : LOCK(cs_main);
1295 55285 : if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1296 0 : return false;
1297 55285 : if (fImporting || fReindex)
1298 9435 : return true;
1299 45850 : if (m_chain.Tip() == nullptr)
1300 1 : return true;
1301 45849 : if (m_chain.Tip()->nChainWork < nMinimumChainWork)
1302 2535 : return true;
1303 43314 : if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
1304 42909 : return true;
1305 405 : LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1306 405 : m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1307 405 : return false;
1308 932699 : }
1309 :
1310 : static CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
1311 :
1312 2 : static void AlertNotify(const std::string& strMessage)
1313 : {
1314 2 : uiInterface.NotifyAlertChanged();
1315 : #if HAVE_SYSTEM
1316 2 : std::string strCmd = gArgs.GetArg("-alertnotify", "");
1317 2 : if (strCmd.empty()) return;
1318 :
1319 : // Alert text should be plain ascii coming from a trusted source, but to
1320 : // be safe we first strip anything not in safeChars, then add single quotes around
1321 : // the whole string before passing it to the shell:
1322 2 : std::string singleQuote("'");
1323 2 : std::string safeStatus = SanitizeString(strMessage);
1324 2 : safeStatus = singleQuote+safeStatus+singleQuote;
1325 2 : boost::replace_all(strCmd, "%s", safeStatus);
1326 :
1327 2 : std::thread t(runCommand, strCmd);
1328 2 : t.detach(); // thread runs free
1329 : #endif
1330 2 : }
1331 :
1332 44429 : static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1333 : {
1334 44429 : AssertLockHeld(cs_main);
1335 : // Before we get past initial download, we cannot reliably alert about forks
1336 : // (we assume we don't get stuck on a fork before finishing our initial sync)
1337 44429 : if (::ChainstateActive().IsInitialBlockDownload())
1338 : return;
1339 :
1340 : // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1341 : // of our head, drop it
1342 38260 : if (pindexBestForkTip && ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72)
1343 0 : pindexBestForkTip = nullptr;
1344 :
1345 38260 : if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > ::ChainActive().Tip()->nChainWork + (GetBlockProof(*::ChainActive().Tip()) * 6)))
1346 : {
1347 13 : if (!GetfLargeWorkForkFound() && pindexBestForkBase)
1348 : {
1349 0 : std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
1350 0 : pindexBestForkBase->phashBlock->ToString() + std::string("'");
1351 0 : AlertNotify(warning);
1352 0 : }
1353 13 : if (pindexBestForkTip && pindexBestForkBase)
1354 : {
1355 0 : LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
1356 0 : pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
1357 0 : pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
1358 0 : SetfLargeWorkForkFound(true);
1359 0 : }
1360 : else
1361 : {
1362 13 : LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1363 13 : SetfLargeWorkInvalidChainFound(true);
1364 : }
1365 : }
1366 : else
1367 : {
1368 38247 : SetfLargeWorkForkFound(false);
1369 38247 : SetfLargeWorkInvalidChainFound(false);
1370 : }
1371 44429 : }
1372 :
1373 176 : static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1374 : {
1375 176 : AssertLockHeld(cs_main);
1376 : // If we are on a fork that is sufficiently large, set a warning flag
1377 : CBlockIndex* pfork = pindexNewForkTip;
1378 349 : CBlockIndex* plonger = ::ChainActive().Tip();
1379 349 : while (pfork && pfork != plonger)
1380 : {
1381 173 : while (plonger && plonger->nHeight > pfork->nHeight)
1382 0 : plonger = plonger->pprev;
1383 173 : if (pfork == plonger)
1384 : break;
1385 173 : pfork = pfork->pprev;
1386 : }
1387 :
1388 : // We define a condition where we should warn the user about as a fork of at least 7 blocks
1389 : // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1390 : // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1391 : // hash rate operating on the fork.
1392 : // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1393 : // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1394 : // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1395 352 : if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
1396 176 : pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
1397 0 : ::ChainActive().Height() - pindexNewForkTip->nHeight < 72)
1398 : {
1399 0 : pindexBestForkTip = pindexNewForkTip;
1400 0 : pindexBestForkBase = pfork;
1401 0 : }
1402 :
1403 176 : CheckForkWarningConditions();
1404 176 : }
1405 :
1406 : // Called both upon regular invalid block discovery *and* InvalidateBlock
1407 413 : void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1408 : {
1409 413 : if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1410 88 : pindexBestInvalid = pindexNew;
1411 413 : if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1412 224 : pindexBestHeader = ::ChainActive().Tip();
1413 224 : }
1414 :
1415 413 : LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
1416 413 : pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1417 413 : log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1418 413 : CBlockIndex *tip = ::ChainActive().Tip();
1419 413 : assert (tip);
1420 413 : LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
1421 413 : tip->GetBlockHash().ToString(), ::ChainActive().Height(), log(tip->nChainWork.getdouble())/log(2.0),
1422 413 : FormatISO8601DateTime(tip->GetBlockTime()));
1423 413 : CheckForkWarningConditions();
1424 413 : }
1425 :
1426 : // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1427 : // which does its own setBlockIndexCandidates manageent.
1428 176 : void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) {
1429 176 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
1430 176 : pindex->nStatus |= BLOCK_FAILED_VALID;
1431 176 : m_blockman.m_failed_blocks.insert(pindex);
1432 176 : setDirtyBlockIndex.insert(pindex);
1433 176 : setBlockIndexCandidates.erase(pindex);
1434 176 : InvalidChainFound(pindex);
1435 176 : }
1436 176 : }
1437 :
1438 5277731 : void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1439 : {
1440 : // mark inputs spent
1441 5277731 : if (!tx.IsCoinBase()) {
1442 5205812 : txundo.vprevout.reserve(tx.vin.size());
1443 12073701 : for (const CTxIn &txin : tx.vin) {
1444 6867889 : txundo.vprevout.emplace_back();
1445 6867889 : bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1446 6867889 : assert(is_spent);
1447 : }
1448 5205812 : }
1449 : // add outputs
1450 5277731 : AddCoins(inputs, tx, nHeight);
1451 5277731 : }
1452 :
1453 5112895 : void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
1454 : {
1455 5112895 : CTxUndo txundo;
1456 5112895 : UpdateCoins(tx, inputs, txundo, nHeight);
1457 5112895 : }
1458 :
1459 338196 : bool CScriptCheck::operator()() {
1460 338196 : const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1461 338196 : const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1462 338196 : return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
1463 0 : }
1464 :
1465 72379 : int GetSpendHeight(const CCoinsViewCache& inputs)
1466 : {
1467 72379 : LOCK(cs_main);
1468 72379 : CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1469 72379 : return pindexPrev->nHeight + 1;
1470 72379 : }
1471 :
1472 :
1473 640 : static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache;
1474 640 : static CSHA256 g_scriptExecutionCacheHasher;
1475 :
1476 989 : void InitScriptExecutionCache() {
1477 : // Setup the salted hasher
1478 989 : uint256 nonce = GetRandHash();
1479 : // We want the nonce to be 64 bytes long to force the hasher to process
1480 : // this chunk, which makes later hash computations more efficient. We
1481 : // just write our 32-byte entropy twice to fill the 64 bytes.
1482 989 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
1483 989 : g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
1484 : // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1485 : // setup_bytes creates the minimum possible cache (2 elements).
1486 989 : size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
1487 989 : size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
1488 989 : LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1489 989 : (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
1490 989 : }
1491 :
1492 : /**
1493 : * Check whether all of this transaction's input scripts succeed.
1494 : *
1495 : * This involves ECDSA signature checks so can be computationally intensive. This function should
1496 : * only be called after the cheap sanity checks in CheckTxInputs passed.
1497 : *
1498 : * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
1499 : * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
1500 : * not pushed onto pvChecks/run.
1501 : *
1502 : * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
1503 : * which are matched. This is useful for checking blocks where we will likely never need the cache
1504 : * entry again.
1505 : *
1506 : * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
1507 : * callers should probably reset it to CONSENSUS in such cases.
1508 : *
1509 : * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
1510 : */
1511 355406 : bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1512 : {
1513 355406 : if (tx.IsCoinBase()) return true;
1514 :
1515 355406 : if (pvChecks) {
1516 188091 : pvChecks->reserve(tx.vin.size());
1517 188091 : }
1518 :
1519 : // First check if script executions have been cached with the same
1520 : // flags. Note that this assumes that the inputs provided are
1521 : // correct (ie that the transaction hash which is in tx's prevouts
1522 : // properly commits to the scriptPubKey in the inputs view of that
1523 : // transaction).
1524 355406 : uint256 hashCacheEntry;
1525 355406 : CSHA256 hasher = g_scriptExecutionCacheHasher;
1526 355406 : hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1527 355406 : AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1528 355406 : if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1529 90722 : return true;
1530 : }
1531 :
1532 264684 : if (!txdata.m_ready) {
1533 51447 : txdata.Init(tx);
1534 51447 : }
1535 :
1536 537209 : for (unsigned int i = 0; i < tx.vin.size(); i++) {
1537 335929 : const COutPoint &prevout = tx.vin[i].prevout;
1538 335929 : const Coin& coin = inputs.AccessCoin(prevout);
1539 335929 : assert(!coin.IsSpent());
1540 :
1541 : // We very carefully only pass in things to CScriptCheck which
1542 : // are clearly committed to by tx' witness hash. This provides
1543 : // a sanity check that our caching is not introducing consensus
1544 : // failures through additional data in, eg, the coins being
1545 : // spent being checked as a part of CScriptCheck.
1546 :
1547 : // Verify signature
1548 335929 : CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata);
1549 335929 : if (pvChecks) {
1550 120666 : pvChecks->push_back(CScriptCheck());
1551 120666 : check.swap(pvChecks->back());
1552 215263 : } else if (!check()) {
1553 63404 : if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
1554 : // Check whether the failure was caused by a
1555 : // non-mandatory script verification check, such as
1556 : // non-standard DER encodings or non-null dummy
1557 : // arguments; if so, ensure we return NOT_STANDARD
1558 : // instead of CONSENSUS to avoid downstream users
1559 : // splitting the network between upgraded and
1560 : // non-upgraded nodes by banning CONSENSUS-failing
1561 : // data providers.
1562 126804 : CScriptCheck check2(coin.out, tx, i,
1563 63402 : flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1564 63402 : if (check2())
1565 46978 : return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1566 63402 : }
1567 : // MANDATORY flag failures correspond to
1568 : // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1569 : // failures are the most serious case of validation
1570 : // failures, we may need to consider using
1571 : // RECENT_CONSENSUS_CHANGE for any script failure that
1572 : // could be due to non-upgraded nodes which we may want to
1573 : // support, to avoid splitting the network (but this
1574 : // depends on the details of how net_processing handles
1575 : // such errors).
1576 16426 : return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1577 : }
1578 335929 : }
1579 :
1580 201280 : if (cacheFullScriptStore && !pvChecks) {
1581 : // We executed all of the provided scripts, and were told to
1582 : // cache the result. Do so now.
1583 82129 : g_scriptExecutionCache.insert(hashCacheEntry);
1584 82129 : }
1585 :
1586 201280 : return true;
1587 355406 : }
1588 :
1589 44902 : static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
1590 : {
1591 : // Open history file to append
1592 44902 : CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
1593 44902 : if (fileout.IsNull())
1594 0 : return error("%s: OpenUndoFile failed", __func__);
1595 :
1596 : // Write index header
1597 44902 : unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1598 44902 : fileout << messageStart << nSize;
1599 :
1600 : // Write undo data
1601 44902 : long fileOutPos = ftell(fileout.Get());
1602 44902 : if (fileOutPos < 0)
1603 0 : return error("%s: ftell failed", __func__);
1604 44902 : pos.nPos = (unsigned int)fileOutPos;
1605 44902 : fileout << blockundo;
1606 :
1607 : // calculate & write checksum
1608 44902 : CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
1609 44902 : hasher << hashBlock;
1610 44902 : hasher << blockundo;
1611 44902 : fileout << hasher.GetHash();
1612 :
1613 : return true;
1614 44902 : }
1615 :
1616 11285 : bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
1617 : {
1618 11285 : FlatFilePos pos = pindex->GetUndoPos();
1619 11285 : if (pos.IsNull()) {
1620 0 : return error("%s: no undo data available", __func__);
1621 : }
1622 :
1623 : // Open history file to read
1624 11285 : CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1625 11285 : if (filein.IsNull())
1626 1 : return error("%s: OpenUndoFile failed", __func__);
1627 :
1628 : // Read block
1629 11284 : uint256 hashChecksum;
1630 11284 : CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
1631 : try {
1632 11284 : verifier << pindex->pprev->GetBlockHash();
1633 11284 : verifier >> blockundo;
1634 11283 : filein >> hashChecksum;
1635 1 : }
1636 : catch (const std::exception& e) {
1637 1 : return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1638 1 : }
1639 :
1640 : // Verify checksum
1641 11283 : if (hashChecksum != verifier.GetHash())
1642 0 : return error("%s: Checksum mismatch", __func__);
1643 :
1644 11283 : return true;
1645 11286 : }
1646 :
1647 : /** Abort with a message */
1648 1 : static bool AbortNode(const std::string& strMessage, bilingual_str user_message = bilingual_str())
1649 : {
1650 1 : SetMiscWarning(Untranslated(strMessage));
1651 1 : LogPrintf("*** %s\n", strMessage);
1652 1 : if (user_message.empty()) {
1653 1 : user_message = _("A fatal internal error occurred, see debug.log for details");
1654 1 : }
1655 1 : AbortError(user_message);
1656 1 : StartShutdown();
1657 1 : return false;
1658 0 : }
1659 :
1660 1 : static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str())
1661 : {
1662 1 : AbortNode(strMessage, userMessage);
1663 1 : return state.Error(strMessage);
1664 0 : }
1665 :
1666 : /**
1667 : * Restore the UTXO in a Coin at a given COutPoint
1668 : * @param undo The Coin to be restored.
1669 : * @param view The coins view to which to apply the changes.
1670 : * @param out The out point that corresponds to the tx input.
1671 : * @return A DisconnectResult as an int
1672 : */
1673 19204 : int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1674 : {
1675 : bool fClean = true;
1676 :
1677 19204 : if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1678 :
1679 19204 : if (undo.nHeight == 0) {
1680 : // Missing undo metadata (height and coinbase). Older versions included this
1681 : // information only in undo records for the last spend of a transactions'
1682 : // outputs. This implies that it must be present for some other output of the same tx.
1683 0 : const Coin& alternate = AccessByTxid(view, out.hash);
1684 0 : if (!alternate.IsSpent()) {
1685 0 : undo.nHeight = alternate.nHeight;
1686 0 : undo.fCoinBase = alternate.fCoinBase;
1687 : } else {
1688 0 : return DISCONNECT_FAILED; // adding output for transaction without known metadata
1689 : }
1690 0 : }
1691 : // If the coin already exists as an unspent coin in the cache, then the
1692 : // possible_overwrite parameter to AddCoin must be set to true. We have
1693 : // already checked whether an unspent coin exists above using HaveCoin, so
1694 : // we don't need to guess. When fClean is false, an unspent coin already
1695 : // existed and it is an overwrite.
1696 19204 : view.AddCoin(out, std::move(undo), !fClean);
1697 :
1698 19204 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1699 19204 : }
1700 :
1701 : /** Undo the effects of this block (with given index) on the UTXO set represented by coins.
1702 : * When FAILED is returned, view is left in an indeterminate state. */
1703 5020 : DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
1704 : {
1705 107995 : bool fClean = true;
1706 :
1707 5020 : CBlockUndo blockUndo;
1708 5020 : if (!UndoReadFromDisk(blockUndo, pindex)) {
1709 1 : error("DisconnectBlock(): failure reading undo data");
1710 1 : return DISCONNECT_FAILED;
1711 : }
1712 :
1713 5019 : if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1714 0 : error("DisconnectBlock(): block and undo data inconsistent");
1715 0 : return DISCONNECT_FAILED;
1716 : }
1717 :
1718 : // undo transactions in reverse order
1719 17016 : for (int i = block.vtx.size() - 1; i >= 0; i--) {
1720 11997 : const CTransaction &tx = *(block.vtx[i]);
1721 11997 : uint256 hash = tx.GetHash();
1722 11997 : bool is_coinbase = tx.IsCoinBase();
1723 :
1724 : // Check that all outputs are available and match the outputs in the block itself
1725 : // exactly.
1726 42553 : for (size_t o = 0; o < tx.vout.size(); o++) {
1727 30556 : if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1728 19471 : COutPoint out(hash, o);
1729 19471 : Coin coin;
1730 19471 : bool is_spent = view.SpendCoin(out, &coin);
1731 19471 : if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1732 : fClean = false; // transaction output mismatch
1733 0 : }
1734 19471 : }
1735 : }
1736 :
1737 : // restore inputs
1738 11997 : if (i > 0) { // not coinbases
1739 6978 : CTxUndo &txundo = blockUndo.vtxundo[i-1];
1740 6978 : if (txundo.vprevout.size() != tx.vin.size()) {
1741 0 : error("DisconnectBlock(): transaction and undo data inconsistent");
1742 11997 : return DISCONNECT_FAILED;
1743 : }
1744 24432 : for (unsigned int j = tx.vin.size(); j-- > 0;) {
1745 17454 : const COutPoint &out = tx.vin[j].prevout;
1746 17454 : int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1747 17454 : if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1748 17454 : fClean = fClean && res != DISCONNECT_UNCLEAN;
1749 17454 : }
1750 : // At this point, all of txundo.vprevout should have been moved out.
1751 6978 : }
1752 11997 : }
1753 :
1754 : // move best block pointer to prevout block
1755 5019 : view.SetBestBlock(pindex->pprev->GetBlockHash());
1756 :
1757 5019 : return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1758 5020 : }
1759 :
1760 1370 : static void FlushUndoFile(int block_file, bool finalize = false)
1761 : {
1762 1370 : FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
1763 1370 : if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
1764 0 : AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
1765 0 : }
1766 1370 : }
1767 :
1768 1370 : static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
1769 : {
1770 1370 : LOCK(cs_LastBlockFile);
1771 1370 : FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
1772 1370 : if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
1773 0 : AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
1774 0 : }
1775 : // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
1776 : // e.g. during IBD or a sync after a node going offline
1777 1370 : if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
1778 1370 : }
1779 :
1780 : static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
1781 :
1782 46458 : static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
1783 : {
1784 : // Write undo information to disk
1785 46458 : if (pindex->GetUndoPos().IsNull()) {
1786 44902 : FlatFilePos _pos;
1787 44902 : if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
1788 0 : return error("ConnectBlock(): FindUndoPos failed");
1789 44902 : if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
1790 0 : return AbortNode(state, "Failed to write undo data");
1791 : // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1792 : // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1793 : // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1794 : // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1795 : // the FindBlockPos function
1796 44902 : if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
1797 0 : FlushUndoFile(_pos.nFile, true);
1798 0 : }
1799 :
1800 : // update nUndoPos in block index
1801 44902 : pindex->nUndoPos = _pos.nPos;
1802 44902 : pindex->nStatus |= BLOCK_HAVE_UNDO;
1803 44902 : setDirtyBlockIndex.insert(pindex);
1804 44902 : }
1805 :
1806 46458 : return true;
1807 46458 : }
1808 :
1809 640 : static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
1810 :
1811 1750 : void ThreadScriptCheck(int worker_num) {
1812 1750 : util::ThreadRename(strprintf("scriptch.%i", worker_num));
1813 1759 : scriptcheckqueue.Thread();
1814 1759 : }
1815 :
1816 640 : VersionBitsCache versionbitscache GUARDED_BY(cs_main);
1817 :
1818 3585912 : int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
1819 : {
1820 3585912 : LOCK(cs_main);
1821 : int32_t nVersion = VERSIONBITS_TOP_BITS;
1822 :
1823 7171824 : for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1824 3585912 : ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache);
1825 3585912 : if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
1826 1629840 : nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
1827 1629840 : }
1828 : }
1829 :
1830 : return nVersion;
1831 3585912 : }
1832 :
1833 : /**
1834 : * Threshold condition checker that triggers when unknown versionbits are seen on the network.
1835 : */
1836 : class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
1837 : {
1838 : private:
1839 : int bit;
1840 :
1841 : public:
1842 2478108 : explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
1843 :
1844 1239054 : int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
1845 1239054 : int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
1846 1239054 : int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
1847 1239054 : int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1848 :
1849 513360 : bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
1850 : {
1851 1026720 : return pindex->nHeight >= params.MinBIP9WarningHeight &&
1852 513360 : ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
1853 429434 : ((pindex->nVersion >> bit) & 1) != 0 &&
1854 10378 : ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
1855 : }
1856 : };
1857 :
1858 37120 : static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
1859 :
1860 : // 0.13.0 was shipped with a segwit deployment defined for testnet, but not for
1861 : // mainnet. We no longer need to support disabling the segwit deployment
1862 : // except for testing purposes, due to limitations of the functional test
1863 : // environment. See test/functional/p2p-segwit.py.
1864 86488 : static bool IsScriptWitnessEnabled(const Consensus::Params& params)
1865 : {
1866 86488 : return params.SegwitHeight != std::numeric_limits<int>::max();
1867 : }
1868 :
1869 86488 : static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1870 86488 : AssertLockHeld(cs_main);
1871 :
1872 : unsigned int flags = SCRIPT_VERIFY_NONE;
1873 :
1874 : // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
1875 : // retroactively applied to testnet)
1876 : // However, only one historical block violated the P2SH rules (on both
1877 : // mainnet and testnet), so for simplicity, always leave P2SH
1878 : // on except for the one violating block.
1879 86615 : if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
1880 127 : pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
1881 127 : *pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
1882 : {
1883 : flags |= SCRIPT_VERIFY_P2SH;
1884 86488 : }
1885 :
1886 : // Enforce WITNESS rules whenever P2SH is in effect (and the segwit
1887 : // deployment is defined).
1888 86488 : if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) {
1889 86219 : flags |= SCRIPT_VERIFY_WITNESS;
1890 86219 : }
1891 :
1892 : // Start enforcing the DERSIG (BIP66) rule
1893 86488 : if (pindex->nHeight >= consensusparams.BIP66Height) {
1894 3405 : flags |= SCRIPT_VERIFY_DERSIG;
1895 3405 : }
1896 :
1897 : // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
1898 86488 : if (pindex->nHeight >= consensusparams.BIP65Height) {
1899 2804 : flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
1900 2804 : }
1901 :
1902 : // Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
1903 86488 : if (pindex->nHeight >= consensusparams.CSVHeight) {
1904 13741 : flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
1905 13741 : }
1906 :
1907 : // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit)
1908 86488 : if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
1909 83005 : flags |= SCRIPT_VERIFY_NULLDUMMY;
1910 83005 : }
1911 :
1912 86488 : return flags;
1913 : }
1914 :
1915 :
1916 :
1917 : static int64_t nTimeCheck = 0;
1918 : static int64_t nTimeForks = 0;
1919 : static int64_t nTimeVerify = 0;
1920 : static int64_t nTimeConnect = 0;
1921 : static int64_t nTimeIndex = 0;
1922 : static int64_t nTimeCallbacks = 0;
1923 : static int64_t nTimeTotal = 0;
1924 : static int64_t nBlocksTotal = 0;
1925 :
1926 : /** Apply the effects of this block (with given index) on the UTXO set represented by coins.
1927 : * Validity checks that depend on the UTXO set are also done; ConnectBlock()
1928 : * can fail if those validity checks fail (among other reasons). */
1929 67945 : bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
1930 : CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck)
1931 : {
1932 67945 : AssertLockHeld(cs_main);
1933 67945 : assert(pindex);
1934 67945 : assert(*pindex->phashBlock == block.GetHash());
1935 67945 : int64_t nTimeStart = GetTimeMicros();
1936 :
1937 : // Check it again in case a previous version let a bad block in
1938 : // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1939 : // ContextualCheckBlockHeader() here. This means that if we add a new
1940 : // consensus rule that is enforced in one of those two functions, then we
1941 : // may have let in a block that violates the rule prior to updating the
1942 : // software, and we would NOT be enforcing the rule here. Fully solving
1943 : // upgrade from one software version to the next after a consensus rule
1944 : // change is potentially tricky and issue-specific (see RewindBlockIndex()
1945 : // for one general approach that was used for BIP 141 deployment).
1946 : // Also, currently the rule against blocks more than 2 hours in the future
1947 : // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1948 : // re-enforce that rule here (at least until we make it impossible for
1949 : // GetAdjustedTime() to go backward).
1950 67945 : if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
1951 0 : if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
1952 : // We don't write down blocks to disk if they may have been
1953 : // corrupted, so this should be impossible unless we're having hardware
1954 : // problems.
1955 0 : return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
1956 : }
1957 0 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
1958 : }
1959 :
1960 : // verify that the view's current state corresponds to the previous block
1961 67945 : uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
1962 67945 : assert(hashPrevBlock == view.GetBestBlock());
1963 :
1964 67945 : nBlocksTotal++;
1965 :
1966 : // Special case for the genesis block, skipping connection of its transactions
1967 : // (its coinbase is unspendable)
1968 67945 : if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
1969 293 : if (!fJustCheck)
1970 293 : view.SetBestBlock(pindex->GetBlockHash());
1971 293 : return true;
1972 : }
1973 :
1974 : bool fScriptChecks = true;
1975 67652 : if (!hashAssumeValid.IsNull()) {
1976 : // We've been configured with the hash of a block which has been externally verified to have a valid history.
1977 : // A suitable default value is included with the software and updated from time to time. Because validity
1978 : // relative to a piece of software is an objective fact these defaults can be easily reviewed.
1979 : // This setting doesn't force the selection of any particular chain but makes validating some faster by
1980 : // effectively caching the result of part of the verification.
1981 2431 : BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
1982 2431 : if (it != m_blockman.m_block_index.end()) {
1983 2508 : if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1984 204 : pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1985 204 : pindexBestHeader->nChainWork >= nMinimumChainWork) {
1986 : // This block is a member of the assumed verified chain and an ancestor of the best header.
1987 : // Script verification is skipped when connecting blocks under the
1988 : // assumevalid block. Assuming the assumevalid block is valid this
1989 : // is safe because block merkle hashes are still computed and checked,
1990 : // Of course, if an assumed valid block is invalid due to false scriptSigs
1991 : // this optimization would allow an invalid chain to be accepted.
1992 : // The equivalent time check discourages hash power from extorting the network via DOS attack
1993 : // into accepting an invalid block through telling users they must manually set assumevalid.
1994 : // Requiring a software change or burying the invalid block, regardless of the setting, makes
1995 : // it hard to hide the implication of the demand. This also avoids having release candidates
1996 : // that are hardly doing any signature verification at all in testing without having to
1997 : // artificially set the default assumed verified block further back.
1998 : // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
1999 : // least as good as the expected chain.
2000 204 : fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
2001 204 : }
2002 : }
2003 2431 : }
2004 :
2005 67652 : int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
2006 67652 : LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
2007 :
2008 : // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2009 : // unless those are already completely spent.
2010 : // If such overwrites are allowed, coinbases and transactions depending upon those
2011 : // can be duplicated to remove the ability to spend the first instance -- even after
2012 : // being sent to another address.
2013 : // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
2014 : // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2015 : // already refuses previously-known transaction ids entirely.
2016 : // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2017 : // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2018 : // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2019 : // initial block download.
2020 135304 : bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2021 67652 : (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2022 :
2023 : // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2024 : // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2025 : // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2026 : // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
2027 : // duplicate transactions descending from the known pairs either.
2028 : // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2029 :
2030 : // BIP34 requires that a block at height X (block X) has its coinbase
2031 : // scriptSig start with a CScriptNum of X (indicated height X). The above
2032 : // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
2033 : // case that there is a block X before the BIP34 height of 227,931 which has
2034 : // an indicated height Y where Y is greater than X. The coinbase for block
2035 : // X would also be a valid coinbase for block Y, which could be a BIP30
2036 : // violation. An exhaustive search of all mainnet coinbases before the
2037 : // BIP34 height which have an indicated height greater than the block height
2038 : // reveals many occurrences. The 3 lowest indicated heights found are
2039 : // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
2040 : // heights would be the first opportunity for BIP30 to be violated.
2041 :
2042 : // The search reveals a great many blocks which have an indicated height
2043 : // greater than 1,983,702, so we simply remove the optimization to skip
2044 : // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
2045 : // that block in another 25 years or so, we should take advantage of a
2046 : // future consensus change to do a new and improved version of BIP34 that
2047 : // will actually prevent ever creating any duplicate coinbases in the
2048 : // future.
2049 : static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
2050 :
2051 : // There is no potential to create a duplicate coinbase at block 209,921
2052 : // because this is still before the BIP34 height and so explicit BIP30
2053 : // checking is still active.
2054 :
2055 : // The final case is block 176,684 which has an indicated height of
2056 : // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
2057 : // before block 490,897 so there was not much opportunity to address this
2058 : // case other than to carefully analyze it and determine it would not be a
2059 : // problem. Block 490,897 was, in fact, mined with a different coinbase than
2060 : // block 176,684, but it is important to note that even if it hadn't been or
2061 : // is remined on an alternate fork with a duplicate coinbase, we would still
2062 : // not run into a BIP30 violation. This is because the coinbase for 176,684
2063 : // is spent in block 185,956 in transaction
2064 : // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
2065 : // spending transaction can't be duplicated because it also spends coinbase
2066 : // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
2067 : // coinbase has an indicated height of over 4.2 billion, and wouldn't be
2068 : // duplicatable until that height, and it's currently impossible to create a
2069 : // chain that long. Nevertheless we may wish to consider a future soft fork
2070 : // which retroactively prevents block 490,897 from creating a duplicate
2071 : // coinbase. The two historical BIP30 violations often provide a confusing
2072 : // edge case when manipulating the UTXO and it would be simpler not to have
2073 : // another edge case to deal with.
2074 :
2075 : // testnet3 has no blocks before the BIP34 height with indicated heights
2076 : // post BIP34 before approximately height 486,000,000 and presumably will
2077 : // be reset before it reaches block 1,983,702 and starts doing unnecessary
2078 : // BIP30 checking again.
2079 67652 : assert(pindex->pprev);
2080 67652 : CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
2081 : //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2082 67652 : fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
2083 :
2084 : // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
2085 : // consensus change that ensures coinbases at those heights can not
2086 : // duplicate earlier coinbases.
2087 67652 : if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
2088 194943 : for (const auto& tx : block.vtx) {
2089 480197 : for (size_t o = 0; o < tx->vout.size(); o++) {
2090 352907 : if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
2091 1 : LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2092 254402 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
2093 : }
2094 : }
2095 127290 : }
2096 : }
2097 :
2098 : // Start enforcing BIP68 (sequence locks)
2099 : int nLockTimeFlags = 0;
2100 67651 : if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) {
2101 : nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
2102 13534 : }
2103 :
2104 : // Get the script flags for this block
2105 67651 : unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
2106 :
2107 67651 : int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
2108 67651 : LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
2109 :
2110 67651 : CBlockUndo blockundo;
2111 :
2112 : // Precomputed transaction data pointers must not be invalidated
2113 : // until after `control` has run the script checks (potentially
2114 : // in multiple threads). Preallocate the vector size so a new allocation
2115 : // doesn't invalidate pointers into the vector, and keep txsdata in scope
2116 : // for as long as `control`.
2117 67651 : CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
2118 67651 : std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
2119 :
2120 67651 : std::vector<int> prevheights;
2121 67651 : CAmount nFees = 0;
2122 : int nInputs = 0;
2123 : int64_t nSigOpsCost = 0;
2124 67651 : blockundo.vtxundo.reserve(block.vtx.size() - 1);
2125 194802 : for (unsigned int i = 0; i < block.vtx.size(); i++)
2126 : {
2127 127287 : const CTransaction &tx = *(block.vtx[i]);
2128 :
2129 127287 : nInputs += tx.vin.size();
2130 :
2131 127287 : if (!tx.IsCoinBase())
2132 : {
2133 59636 : CAmount txfee = 0;
2134 59636 : TxValidationState tx_state;
2135 59636 : if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
2136 : // Any transaction validation failure in ConnectBlock is a block consensus failure
2137 50 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
2138 25 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2139 25 : return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
2140 : }
2141 59611 : nFees += txfee;
2142 59611 : if (!MoneyRange(nFees)) {
2143 0 : LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
2144 0 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
2145 : }
2146 :
2147 : // Check that transaction is BIP68 final
2148 : // BIP68 lock checks (as opposed to nLockTime checks) must
2149 : // be in ConnectBlock because they require the UTXO set
2150 59611 : prevheights.resize(tx.vin.size());
2151 161154 : for (size_t j = 0; j < tx.vin.size(); j++) {
2152 101543 : prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
2153 : }
2154 :
2155 59611 : if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2156 12 : LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
2157 12 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
2158 : }
2159 59636 : }
2160 :
2161 : // GetTransactionSigOpCost counts 3 types of sigops:
2162 : // * legacy (always)
2163 : // * p2sh (when P2SH enabled in flags and excludes coinbase)
2164 : // * witness (when witness enabled in flags and excludes coinbase)
2165 127250 : nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
2166 127250 : if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
2167 5 : LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
2168 5 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
2169 : }
2170 :
2171 127245 : if (!tx.IsCoinBase())
2172 : {
2173 59594 : std::vector<CScriptCheck> vChecks;
2174 : bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2175 59594 : TxValidationState tx_state;
2176 59594 : if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
2177 : // Any transaction validation failure in ConnectBlock is a block consensus failure
2178 188 : state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
2179 94 : tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2180 94 : return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
2181 94 : tx.GetHash().ToString(), state.ToString());
2182 : }
2183 59500 : control.Add(vChecks);
2184 59594 : }
2185 :
2186 127151 : CTxUndo undoDummy;
2187 127151 : if (i > 0) {
2188 59500 : blockundo.vtxundo.push_back(CTxUndo());
2189 59500 : }
2190 127151 : UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2191 127151 : }
2192 67515 : int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
2193 67515 : LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
2194 :
2195 67515 : CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
2196 67515 : if (block.vtx[0]->GetValueOut() > blockReward) {
2197 5 : LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
2198 5 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
2199 : }
2200 :
2201 67510 : if (!control.Wait()) {
2202 39 : LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
2203 39 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
2204 : }
2205 67471 : int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
2206 67471 : LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
2207 :
2208 67471 : if (fJustCheck)
2209 21013 : return true;
2210 :
2211 46458 : if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams))
2212 0 : return false;
2213 :
2214 46458 : if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
2215 44902 : pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
2216 44902 : setDirtyBlockIndex.insert(pindex);
2217 44902 : }
2218 :
2219 46458 : assert(pindex->phashBlock);
2220 : // add this block to the view's block chain
2221 46458 : view.SetBestBlock(pindex->GetBlockHash());
2222 :
2223 46458 : int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
2224 46458 : LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
2225 :
2226 46458 : int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
2227 46458 : LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
2228 :
2229 : return true;
2230 135596 : }
2231 :
2232 161503 : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
2233 : {
2234 161503 : return this->GetCoinsCacheSizeState(
2235 : tx_pool,
2236 161503 : m_coinstip_cache_size_bytes,
2237 161503 : gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2238 0 : }
2239 :
2240 161505 : CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
2241 : const CTxMemPool* tx_pool,
2242 : size_t max_coins_cache_size_bytes,
2243 : size_t max_mempool_size_bytes)
2244 : {
2245 161505 : const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
2246 161505 : int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2247 323010 : int64_t nTotalSpace =
2248 161505 : max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2249 :
2250 : //! No need to periodic flush if at least this much space still available.
2251 : static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
2252 : int64_t large_threshold =
2253 161505 : std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2254 :
2255 161505 : if (cacheSize > nTotalSpace) {
2256 1 : LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
2257 1 : return CoinsCacheSizeState::CRITICAL;
2258 161504 : } else if (cacheSize > large_threshold) {
2259 0 : return CoinsCacheSizeState::LARGE;
2260 : }
2261 161504 : return CoinsCacheSizeState::OK;
2262 161505 : }
2263 :
2264 161503 : bool CChainState::FlushStateToDisk(
2265 : const CChainParams& chainparams,
2266 : BlockValidationState &state,
2267 : FlushStateMode mode,
2268 : int nManualPruneHeight)
2269 : {
2270 161503 : LOCK(cs_main);
2271 161503 : assert(this->CanFlushToDisk());
2272 : static std::chrono::microseconds nLastWrite{0};
2273 : static std::chrono::microseconds nLastFlush{0};
2274 161503 : std::set<int> setFilesToPrune;
2275 : bool full_flush_completed = false;
2276 :
2277 161503 : const size_t coins_count = CoinsTip().GetCacheSize();
2278 161503 : const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2279 :
2280 : try {
2281 : {
2282 : bool fFlushForPrune = false;
2283 : bool fDoFullFlush = false;
2284 161503 : CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool);
2285 161503 : LOCK(cs_LastBlockFile);
2286 161503 : if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
2287 6 : if (nManualPruneHeight > 0) {
2288 0 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2289 :
2290 0 : FindFilesToPruneManual(g_chainman, setFilesToPrune, nManualPruneHeight);
2291 0 : } else {
2292 6 : LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2293 :
2294 6 : FindFilesToPrune(g_chainman, setFilesToPrune, chainparams.PruneAfterHeight());
2295 6 : fCheckForPruning = false;
2296 6 : }
2297 6 : if (!setFilesToPrune.empty()) {
2298 : fFlushForPrune = true;
2299 0 : if (!fHavePruned) {
2300 0 : pblocktree->WriteFlag("prunedblockfiles", true);
2301 0 : fHavePruned = true;
2302 0 : }
2303 : }
2304 : }
2305 161503 : const auto nNow = GetTime<std::chrono::microseconds>();
2306 : // Avoid writing/flushing immediately after startup.
2307 161503 : if (nLastWrite.count() == 0) {
2308 524 : nLastWrite = nNow;
2309 524 : }
2310 161503 : if (nLastFlush.count() == 0) {
2311 524 : nLastFlush = nNow;
2312 524 : }
2313 : // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2314 161503 : bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2315 : // The cache is over the limit, we have to write now.
2316 161503 : bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2317 : // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2318 161503 : bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2319 : // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2320 161503 : bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2321 : // Combine all conditions that result in a full cache flush.
2322 161503 : fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2323 : // Write blocks and block index to disk.
2324 161503 : if (fDoFullFlush || fPeriodicWrite) {
2325 : // Depend on nMinDiskSpace to ensure we can write block index
2326 1360 : if (!CheckDiskSpace(GetBlocksDir())) {
2327 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2328 : }
2329 : {
2330 1360 : LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2331 :
2332 : // First make sure all block and undo data is flushed to disk.
2333 1360 : FlushBlockFile();
2334 1360 : }
2335 :
2336 : // Then update all block file information (which may refer to block and undo files).
2337 : {
2338 1360 : LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2339 :
2340 1360 : std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2341 1360 : vFiles.reserve(setDirtyFileInfo.size());
2342 1742 : for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2343 382 : vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
2344 382 : setDirtyFileInfo.erase(it++);
2345 : }
2346 1360 : std::vector<const CBlockIndex*> vBlocks;
2347 1360 : vBlocks.reserve(setDirtyBlockIndex.size());
2348 45910 : for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2349 44550 : vBlocks.push_back(*it);
2350 44550 : setDirtyBlockIndex.erase(it++);
2351 : }
2352 1360 : if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2353 0 : return AbortNode(state, "Failed to write to block index database");
2354 : }
2355 1360 : }
2356 : // Finally remove any pruned files
2357 1360 : if (fFlushForPrune) {
2358 0 : LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2359 :
2360 0 : UnlinkPrunedFiles(setFilesToPrune);
2361 0 : }
2362 1360 : nLastWrite = nNow;
2363 1360 : }
2364 : // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2365 161503 : if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2366 1350 : LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2367 : coins_count, coins_mem_usage / 1000));
2368 :
2369 : // Typical Coin structures on disk are around 48 bytes in size.
2370 : // Pushing a new one to the database can cause it to be written
2371 : // twice (once in the log, and once in the tables). This is already
2372 : // an overestimation, as most will delete an existing entry or
2373 : // overwrite one. Still, use a conservative safety factor of 2.
2374 1350 : if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2375 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2376 : }
2377 : // Flush the chainstate (which may refer to block index entries).
2378 1350 : if (!CoinsTip().Flush())
2379 0 : return AbortNode(state, "Failed to write to coin database");
2380 1350 : nLastFlush = nNow;
2381 : full_flush_completed = true;
2382 1350 : }
2383 161503 : }
2384 161503 : if (full_flush_completed) {
2385 : // Update best block in wallet (so we can detect restored wallets).
2386 1350 : GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
2387 1350 : }
2388 0 : } catch (const std::runtime_error& e) {
2389 0 : return AbortNode(state, std::string("System error while flushing: ") + e.what());
2390 0 : }
2391 161503 : return true;
2392 161503 : }
2393 :
2394 1039 : void CChainState::ForceFlushStateToDisk() {
2395 1039 : BlockValidationState state;
2396 1039 : const CChainParams& chainparams = Params();
2397 1039 : if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2398 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2399 0 : }
2400 1039 : }
2401 :
2402 5 : void CChainState::PruneAndFlush() {
2403 5 : BlockValidationState state;
2404 5 : fCheckForPruning = true;
2405 5 : const CChainParams& chainparams = Params();
2406 :
2407 5 : if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2408 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2409 0 : }
2410 5 : }
2411 :
2412 4 : static void DoWarning(const bilingual_str& warning)
2413 : {
2414 : static bool fWarned = false;
2415 4 : SetMiscWarning(warning);
2416 4 : if (!fWarned) {
2417 2 : AlertNotify(warning.original);
2418 2 : fWarned = true;
2419 2 : }
2420 4 : }
2421 :
2422 : /** Private helper function that concatenates warning messages. */
2423 1442 : static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2424 : {
2425 1442 : if (!res.empty()) res += Untranslated(", ");
2426 1442 : res += warn;
2427 1442 : }
2428 :
2429 : /** Check warning conditions and do some notifications on new chain tip set. */
2430 49828 : static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const CChainParams& chainParams)
2431 : EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
2432 : {
2433 : // New best block
2434 49828 : mempool.AddTransactionsUpdated(1);
2435 :
2436 : {
2437 49828 : LOCK(g_best_block_mutex);
2438 49828 : g_best_block = pindexNew->GetBlockHash();
2439 49828 : g_best_block_cv.notify_all();
2440 49828 : }
2441 :
2442 49828 : bilingual_str warning_messages;
2443 49828 : if (!::ChainstateActive().IsInitialBlockDownload())
2444 : {
2445 42726 : int nUpgraded = 0;
2446 : const CBlockIndex* pindex = pindexNew;
2447 1281780 : for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2448 1239054 : WarningBitsConditionChecker checker(bit);
2449 1239054 : ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
2450 1239054 : if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2451 148 : const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
2452 148 : if (state == ThresholdState::ACTIVE) {
2453 4 : DoWarning(warning);
2454 : } else {
2455 144 : AppendWarning(warning_messages, warning);
2456 : }
2457 148 : }
2458 1239054 : }
2459 : // Check the version of the last 100 blocks to see if we need to upgrade:
2460 3585146 : for (int i = 0; i < 100 && pindex != nullptr; i++)
2461 : {
2462 3542420 : int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
2463 3542420 : if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0)
2464 25675 : ++nUpgraded;
2465 3542420 : pindex = pindex->pprev;
2466 : }
2467 42726 : if (nUpgraded > 0)
2468 1298 : AppendWarning(warning_messages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded));
2469 42726 : }
2470 49828 : LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
2471 49828 : pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
2472 49828 : log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
2473 49828 : FormatISO8601DateTime(pindexNew->GetBlockTime()),
2474 49828 : GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize(),
2475 49828 : !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
2476 49828 : }
2477 :
2478 : /** Disconnect m_chain's tip.
2479 : * After calling, the mempool will be in an inconsistent state, with
2480 : * transactions from disconnected blocks being added to disconnectpool. You
2481 : * should make the mempool consistent again by calling UpdateMempoolForReorg.
2482 : * with cs_main held.
2483 : *
2484 : * If disconnectpool is nullptr, then no disconnected transactions are added to
2485 : * disconnectpool (note that the caller is responsible for mempool consistency
2486 : * in any case).
2487 : */
2488 3285 : bool CChainState::DisconnectTip(BlockValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool)
2489 : {
2490 3285 : AssertLockHeld(cs_main);
2491 3285 : AssertLockHeld(m_mempool.cs);
2492 :
2493 3285 : CBlockIndex *pindexDelete = m_chain.Tip();
2494 3285 : assert(pindexDelete);
2495 : // Read block from disk.
2496 3285 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2497 3285 : CBlock& block = *pblock;
2498 3285 : if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
2499 0 : return error("DisconnectTip(): Failed to read block");
2500 : // Apply the block atomically to the chain state.
2501 3285 : int64_t nStart = GetTimeMicros();
2502 : {
2503 3285 : CCoinsViewCache view(&CoinsTip());
2504 3285 : assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2505 3285 : if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
2506 1 : return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2507 3284 : bool flushed = view.Flush();
2508 3284 : assert(flushed);
2509 3285 : }
2510 3284 : LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
2511 : // Write the chain state to disk, if necessary.
2512 3284 : if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2513 0 : return false;
2514 :
2515 3284 : if (disconnectpool) {
2516 : // Save transactions to re-add to mempool at end of reorg
2517 12438 : for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2518 9301 : disconnectpool->addTransaction(*it);
2519 : }
2520 6342 : while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2521 : // Drop the earliest entry, and remove its children from the mempool.
2522 3205 : auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2523 3205 : m_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
2524 3205 : disconnectpool->removeEntry(it);
2525 3205 : }
2526 : }
2527 :
2528 3284 : m_chain.SetTip(pindexDelete->pprev);
2529 :
2530 3284 : UpdateTip(m_mempool, pindexDelete->pprev, chainparams);
2531 : // Let wallets know transactions went from 1-confirmed to
2532 : // 0-confirmed or conflicted:
2533 3284 : GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2534 3284 : return true;
2535 3285 : }
2536 :
2537 : static int64_t nTimeReadFromDisk = 0;
2538 : static int64_t nTimeConnectTotal = 0;
2539 : static int64_t nTimeFlush = 0;
2540 : static int64_t nTimeChainState = 0;
2541 : static int64_t nTimePostConnect = 0;
2542 :
2543 404034 : struct PerBlockConnectTrace {
2544 104911 : CBlockIndex* pindex = nullptr;
2545 : std::shared_ptr<const CBlock> pblock;
2546 209822 : PerBlockConnectTrace() {}
2547 : };
2548 : /**
2549 : * Used to track blocks whose transactions were applied to the UTXO state as a
2550 : * part of a single ActivateBestChainStep call.
2551 : *
2552 : * This class is single-use, once you call GetBlocksConnected() you have to throw
2553 : * it away and make a new one.
2554 : */
2555 116734 : class ConnectTrace {
2556 : private:
2557 : std::vector<PerBlockConnectTrace> blocksConnected;
2558 :
2559 : public:
2560 116734 : explicit ConnectTrace() : blocksConnected(1) {}
2561 :
2562 46544 : void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2563 46544 : assert(!blocksConnected.back().pindex);
2564 46544 : assert(pindex);
2565 46544 : assert(pblock);
2566 46544 : blocksConnected.back().pindex = pindex;
2567 46544 : blocksConnected.back().pblock = std::move(pblock);
2568 46544 : blocksConnected.emplace_back();
2569 46544 : }
2570 :
2571 44016 : std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2572 : // We always keep one extra block at the end of our list because
2573 : // blocks are added after all the conflicted transactions have
2574 : // been filled in. Thus, the last entry should always be an empty
2575 : // one waiting for the transactions from the next block. We pop
2576 : // the last entry here to make sure the list we return is sane.
2577 44016 : assert(!blocksConnected.back().pindex);
2578 44016 : blocksConnected.pop_back();
2579 44016 : return blocksConnected;
2580 : }
2581 : };
2582 :
2583 : /**
2584 : * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
2585 : * corresponding to pindexNew, to bypass loading it again from disk.
2586 : *
2587 : * The block is added to connectTrace if connection succeeds.
2588 : */
2589 46720 : bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
2590 : {
2591 46720 : AssertLockHeld(cs_main);
2592 46720 : AssertLockHeld(m_mempool.cs);
2593 :
2594 46720 : assert(pindexNew->pprev == m_chain.Tip());
2595 : // Read block from disk.
2596 46720 : int64_t nTime1 = GetTimeMicros();
2597 46720 : std::shared_ptr<const CBlock> pthisBlock;
2598 46720 : if (!pblock) {
2599 5238 : std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2600 5238 : if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
2601 0 : return AbortNode(state, "Failed to read block");
2602 5238 : pthisBlock = pblockNew;
2603 5238 : } else {
2604 41482 : pthisBlock = pblock;
2605 : }
2606 46720 : const CBlock& blockConnecting = *pthisBlock;
2607 : // Apply the block atomically to the chain state.
2608 46720 : int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2609 : int64_t nTime3;
2610 46720 : LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2611 : {
2612 46720 : CCoinsViewCache view(&CoinsTip());
2613 46720 : bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
2614 46720 : GetMainSignals().BlockChecked(blockConnecting, state);
2615 46720 : if (!rv) {
2616 176 : if (state.IsInvalid())
2617 176 : InvalidBlockFound(pindexNew, state);
2618 176 : return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2619 : }
2620 46544 : nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2621 46544 : assert(nBlocksTotal > 0);
2622 46544 : LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
2623 46544 : bool flushed = view.Flush();
2624 46544 : assert(flushed);
2625 46720 : }
2626 46544 : int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2627 46544 : LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
2628 : // Write the chain state to disk, if necessary.
2629 46544 : if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2630 0 : return false;
2631 46544 : int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2632 46544 : LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
2633 : // Remove conflicting transactions from the mempool.;
2634 46544 : m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2635 46544 : disconnectpool.removeForBlock(blockConnecting.vtx);
2636 : // Update m_chain & related variables.
2637 46544 : m_chain.SetTip(pindexNew);
2638 46544 : UpdateTip(m_mempool, pindexNew, chainparams);
2639 :
2640 46544 : int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2641 46544 : LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
2642 46544 : LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
2643 :
2644 46544 : connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2645 : return true;
2646 46720 : }
2647 :
2648 : /**
2649 : * Return the tip of the chain with the most work in it, that isn't
2650 : * known to be invalid (it's however far from certain to be valid).
2651 : */
2652 56238 : CBlockIndex* CChainState::FindMostWorkChain() {
2653 56238 : do {
2654 : CBlockIndex *pindexNew = nullptr;
2655 :
2656 : // Find the best candidate header.
2657 : {
2658 56242 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2659 56242 : if (it == setBlockIndexCandidates.rend())
2660 56242 : return nullptr;
2661 56242 : pindexNew = *it;
2662 56242 : }
2663 :
2664 : // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2665 : // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2666 56242 : CBlockIndex *pindexTest = pindexNew;
2667 : bool fInvalidAncestor = false;
2668 102973 : while (pindexTest && !m_chain.Contains(pindexTest)) {
2669 46735 : assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2670 :
2671 : // Pruned nodes may have entries in setBlockIndexCandidates for
2672 : // which block files have been deleted. Remove those as candidates
2673 : // for the most work chain if we come across them; we can't switch
2674 : // to a chain unless we have all the non-active-chain parent blocks.
2675 46735 : bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2676 46735 : bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2677 46735 : if (fFailedChain || fMissingData) {
2678 : // Candidate chain is not usable (either invalid or missing data)
2679 4 : if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2680 0 : pindexBestInvalid = pindexNew;
2681 4 : CBlockIndex *pindexFailed = pindexNew;
2682 : // Remove the entire chain from the set.
2683 8 : while (pindexTest != pindexFailed) {
2684 4 : if (fFailedChain) {
2685 4 : pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2686 4 : } else if (fMissingData) {
2687 : // If we're missing data, then add back to m_blocks_unlinked,
2688 : // so that if the block arrives in the future we can try adding
2689 : // to setBlockIndexCandidates again.
2690 0 : m_blockman.m_blocks_unlinked.insert(
2691 0 : std::make_pair(pindexFailed->pprev, pindexFailed));
2692 0 : }
2693 4 : setBlockIndexCandidates.erase(pindexFailed);
2694 4 : pindexFailed = pindexFailed->pprev;
2695 : }
2696 4 : setBlockIndexCandidates.erase(pindexTest);
2697 : fInvalidAncestor = true;
2698 : break;
2699 4 : }
2700 46731 : pindexTest = pindexTest->pprev;
2701 46731 : }
2702 56242 : if (!fInvalidAncestor)
2703 56238 : return pindexNew;
2704 56242 : } while(true);
2705 56238 : }
2706 :
2707 : /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2708 47162 : void CChainState::PruneBlockIndexCandidates() {
2709 : // Note that we can't delete the current block itself, as we may need to return to it later in case a
2710 : // reorganization to a better block fails.
2711 47162 : std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2712 140200 : while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2713 93038 : setBlockIndexCandidates.erase(it++);
2714 : }
2715 : // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2716 47162 : assert(!setBlockIndexCandidates.empty());
2717 47162 : }
2718 :
2719 : /**
2720 : * Try to make some progress towards making pindexMostWork the active block.
2721 : * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
2722 : *
2723 : * @returns true unless a system error occurred
2724 : */
2725 44017 : bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2726 : {
2727 44017 : AssertLockHeld(cs_main);
2728 44017 : AssertLockHeld(m_mempool.cs);
2729 :
2730 44017 : const CBlockIndex *pindexOldTip = m_chain.Tip();
2731 44017 : const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2732 :
2733 : // Disconnect active blocks which are no longer in the best chain.
2734 : bool fBlocksDisconnected = false;
2735 44017 : DisconnectedBlockTransactions disconnectpool;
2736 46736 : while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2737 2720 : if (!DisconnectTip(state, chainparams, &disconnectpool)) {
2738 : // This is likely a fatal error, but keep the mempool consistent,
2739 : // just in case. Only remove from the mempool in this case.
2740 1 : UpdateMempoolForReorg(m_mempool, disconnectpool, false);
2741 :
2742 : // If we're unable to disconnect a block during normal operation,
2743 : // then that is a failure of our local system -- we should abort
2744 : // rather than stay on a less work chain.
2745 1 : AbortNode(state, "Failed to disconnect block; see debug.log for details");
2746 1 : return false;
2747 : }
2748 : fBlocksDisconnected = true;
2749 : }
2750 :
2751 : // Build list of new blocks to connect.
2752 44016 : std::vector<CBlockIndex*> vpindexToConnect;
2753 : bool fContinue = true;
2754 44016 : int nHeight = pindexFork ? pindexFork->nHeight : -1;
2755 88109 : while (fContinue && nHeight != pindexMostWork->nHeight) {
2756 : // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2757 : // a few blocks along the way.
2758 44093 : int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2759 44093 : vpindexToConnect.clear();
2760 44093 : vpindexToConnect.reserve(nTargetHeight - nHeight);
2761 44093 : CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2762 146678 : while (pindexIter && pindexIter->nHeight != nHeight) {
2763 102585 : vpindexToConnect.push_back(pindexIter);
2764 102585 : pindexIter = pindexIter->pprev;
2765 : }
2766 : nHeight = nTargetHeight;
2767 :
2768 : // Connect new blocks.
2769 90813 : for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2770 46720 : if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
2771 176 : if (state.IsInvalid()) {
2772 : // The block violates a consensus rule.
2773 176 : if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
2774 176 : InvalidChainFound(vpindexToConnect.front());
2775 : }
2776 176 : state = BlockValidationState();
2777 176 : fInvalidFound = true;
2778 : fContinue = false;
2779 176 : break;
2780 : } else {
2781 : // A system error occurred (disk space, database error, ...).
2782 : // Make the mempool consistent with the current tip, just in case
2783 : // any observers try to use it before shutdown.
2784 0 : UpdateMempoolForReorg(m_mempool, disconnectpool, false);
2785 0 : return false;
2786 : }
2787 : } else {
2788 46544 : PruneBlockIndexCandidates();
2789 46544 : if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2790 : // We're in a better position than we were. Return temporarily to release the lock.
2791 : fContinue = false;
2792 43828 : break;
2793 : }
2794 : }
2795 2716 : }
2796 44093 : }
2797 :
2798 44016 : if (fBlocksDisconnected) {
2799 : // If any blocks were disconnected, disconnectpool may be non empty. Add
2800 : // any disconnected transactions back to the mempool.
2801 69 : UpdateMempoolForReorg(m_mempool, disconnectpool, true);
2802 : }
2803 44016 : m_mempool.check(&CoinsTip());
2804 :
2805 : // Callbacks/notifications for a new best chain.
2806 44016 : if (fInvalidFound)
2807 176 : CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
2808 : else
2809 43840 : CheckForkWarningConditions();
2810 :
2811 44016 : return true;
2812 44017 : }
2813 :
2814 79737 : static SynchronizationState GetSynchronizationState(bool init)
2815 : {
2816 79737 : if (!init) return SynchronizationState::POST_INIT;
2817 9672 : if (::fReindex) return SynchronizationState::INIT_REINDEX;
2818 8327 : return SynchronizationState::INIT_DOWNLOAD;
2819 79737 : }
2820 :
2821 74615 : static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
2822 : bool fNotify = false;
2823 : bool fInitialBlockDownload = false;
2824 : static CBlockIndex* pindexHeaderOld = nullptr;
2825 : CBlockIndex* pindexHeader = nullptr;
2826 : {
2827 74615 : LOCK(cs_main);
2828 74615 : pindexHeader = pindexBestHeader;
2829 :
2830 74615 : if (pindexHeader != pindexHeaderOld) {
2831 : fNotify = true;
2832 35843 : fInitialBlockDownload = ::ChainstateActive().IsInitialBlockDownload();
2833 35843 : pindexHeaderOld = pindexHeader;
2834 35843 : }
2835 74615 : }
2836 : // Send block tip changed notifications without cs_main
2837 74615 : if (fNotify) {
2838 35843 : uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2839 35843 : }
2840 74615 : return fNotify;
2841 0 : }
2842 :
2843 58987 : static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
2844 58987 : AssertLockNotHeld(cs_main);
2845 :
2846 58987 : if (GetMainSignals().CallbacksPending() > 10) {
2847 408 : SyncWithValidationInterfaceQueue();
2848 408 : }
2849 58987 : }
2850 :
2851 56061 : bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
2852 : // Note that while we're often called here from ProcessNewBlock, this is
2853 : // far from a guarantee. Things in the P2P/RPC will often end up calling
2854 : // us in the middle of ProcessNewBlock - do not assume pblock is set
2855 : // sanely for performance or correctness!
2856 56061 : AssertLockNotHeld(cs_main);
2857 :
2858 : // ABC maintains a fair degree of expensive-to-calculate internal state
2859 : // because this function periodically releases cs_main so that it does not lock up other threads for too long
2860 : // during large connects - and to allow for e.g. the callback queue to drain
2861 : // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
2862 56061 : LOCK(m_cs_chainstate);
2863 :
2864 : CBlockIndex *pindexMostWork = nullptr;
2865 175095 : CBlockIndex *pindexNewTip = nullptr;
2866 56061 : int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2867 56062 : do {
2868 : // Block until the validation queue drains. This should largely
2869 : // never happen in normal operation, however may happen during
2870 : // reindex, causing memory blowup if we run too far ahead.
2871 : // Note that if a validationinterface callback ends up calling
2872 : // ActivateBestChain this may lead to a deadlock! We should
2873 : // probably have a DEBUG_LOCKORDER test for this in the future.
2874 58361 : LimitValidationInterfaceQueue();
2875 :
2876 : {
2877 58361 : LOCK(cs_main);
2878 58361 : LOCK(m_mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
2879 58361 : CBlockIndex* starting_tip = m_chain.Tip();
2880 116734 : bool blocks_connected = false;
2881 58361 : do {
2882 : // We absolutely may not unlock cs_main until we've made forward progress
2883 : // (with the exception of shutdown due to hardware issues, low disk space, etc).
2884 58367 : ConnectTrace connectTrace; // Destructed before cs_main is unlocked
2885 :
2886 58367 : if (pindexMostWork == nullptr) {
2887 114605 : pindexMostWork = FindMostWorkChain();
2888 56238 : }
2889 :
2890 : // Whether we have anything to do at all.
2891 58367 : if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
2892 14350 : break;
2893 : }
2894 :
2895 44017 : bool fInvalidFound = false;
2896 44017 : std::shared_ptr<const CBlock> nullBlockPtr;
2897 44017 : if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
2898 : // A system error occurred
2899 1 : return false;
2900 : }
2901 : blocks_connected = true;
2902 :
2903 44016 : if (fInvalidFound) {
2904 : // Wipe cache, we may need another branch now.
2905 : pindexMostWork = nullptr;
2906 176 : }
2907 44016 : pindexNewTip = m_chain.Tip();
2908 :
2909 90560 : for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
2910 46544 : assert(trace.pblock && trace.pindex);
2911 46544 : GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
2912 : }
2913 58367 : } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
2914 58360 : if (!blocks_connected) return true;
2915 :
2916 44010 : const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
2917 44010 : bool fInitialDownload = IsInitialBlockDownload();
2918 :
2919 : // Notify external listeners about the new tip.
2920 : // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
2921 44010 : if (pindexFork != pindexNewTip) {
2922 : // Notify ValidationInterface subscribers
2923 43834 : GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
2924 :
2925 : // Always notify the UI if a new block tip was connected
2926 43834 : uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
2927 : }
2928 58361 : }
2929 : // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2930 :
2931 44010 : if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
2932 :
2933 : // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
2934 : // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
2935 : // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
2936 : // that the best block hash is non-null.
2937 44010 : if (ShutdownRequested()) break;
2938 43996 : } while (pindexNewTip != pindexMostWork);
2939 41711 : CheckBlockIndex(chainparams.GetConsensus());
2940 :
2941 : // Write changes periodically to disk, after relay.
2942 41711 : if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) {
2943 0 : return false;
2944 : }
2945 :
2946 41711 : return true;
2947 56062 : }
2948 :
2949 173 : bool ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
2950 173 : return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock));
2951 0 : }
2952 :
2953 9 : bool CChainState::PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex)
2954 : {
2955 : {
2956 9 : LOCK(cs_main);
2957 9 : if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
2958 : // Nothing to do, this block is not at the tip.
2959 1 : return true;
2960 : }
2961 8 : if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
2962 : // The chain has been extended since the last call, reset the counter.
2963 4 : nBlockReverseSequenceId = -1;
2964 4 : }
2965 8 : nLastPreciousChainwork = m_chain.Tip()->nChainWork;
2966 8 : setBlockIndexCandidates.erase(pindex);
2967 8 : pindex->nSequenceId = nBlockReverseSequenceId;
2968 8 : if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
2969 : // We can't keep reducing the counter if somebody really wants to
2970 : // call preciousblock 2**31-1 times on the same set of tips...
2971 8 : nBlockReverseSequenceId--;
2972 8 : }
2973 8 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
2974 8 : setBlockIndexCandidates.insert(pindex);
2975 8 : PruneBlockIndexCandidates();
2976 : }
2977 9 : }
2978 :
2979 8 : return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
2980 9 : }
2981 9 : bool PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
2982 9 : return ::ChainstateActive().PreciousBlock(state, params, pindex);
2983 : }
2984 :
2985 61 : bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
2986 : {
2987 61 : CBlockIndex* to_mark_failed = pindex;
2988 : bool pindex_was_in_chain = false;
2989 : int disconnected = 0;
2990 :
2991 : // We do not allow ActivateBestChain() to run while InvalidateBlock() is
2992 : // running, as that could cause the tip to change while we disconnect
2993 : // blocks.
2994 61 : LOCK(m_cs_chainstate);
2995 :
2996 : // We'll be acquiring and releasing cs_main below, to allow the validation
2997 : // callbacks to run. However, we should keep the block index in a
2998 : // consistent state as we disconnect blocks -- in particular we need to
2999 : // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3000 : // To avoid walking the block index repeatedly in search of candidates,
3001 : // build a map once so that we can look up candidate blocks by chain
3002 : // work as we go.
3003 61 : std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3004 :
3005 : {
3006 61 : LOCK(cs_main);
3007 14040 : for (const auto& entry : m_blockman.m_block_index) {
3008 13979 : CBlockIndex *candidate = entry.second;
3009 : // We don't need to put anything in our active chain into the
3010 : // multimap, because those candidates will be found and considered
3011 : // as we disconnect.
3012 : // Instead, consider only non-active-chain blocks that have at
3013 : // least as much work as where we expect the new tip to end up.
3014 14553 : if (!m_chain.Contains(candidate) &&
3015 574 : !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3016 361 : candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
3017 6 : candidate->HaveTxsDownloaded()) {
3018 6 : candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
3019 6 : }
3020 13979 : }
3021 61 : }
3022 :
3023 : // Disconnect (descendants of) pindex, and mark them invalid.
3024 61 : while (true) {
3025 479 : if (ShutdownRequested()) break;
3026 :
3027 : // Make sure the queue of validation callbacks doesn't grow unboundedly.
3028 479 : LimitValidationInterfaceQueue();
3029 :
3030 479 : LOCK(cs_main);
3031 479 : LOCK(m_mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between
3032 479 : if (!m_chain.Contains(pindex)) break;
3033 : pindex_was_in_chain = true;
3034 418 : CBlockIndex *invalid_walk_tip = m_chain.Tip();
3035 :
3036 : // ActivateBestChain considers blocks already in m_chain
3037 : // unconditionally valid already, so force disconnect away from it.
3038 418 : DisconnectedBlockTransactions disconnectpool;
3039 418 : bool ret = DisconnectTip(state, chainparams, &disconnectpool);
3040 : // DisconnectTip will add transactions to disconnectpool.
3041 : // Adjust the mempool to be consistent with the new tip, adding
3042 : // transactions back to the mempool if disconnecting was successful,
3043 : // and we're not doing a very deep invalidation (in which case
3044 : // keeping the mempool up to date is probably futile anyway).
3045 418 : UpdateMempoolForReorg(m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
3046 418 : if (!ret) return false;
3047 418 : assert(invalid_walk_tip->pprev == m_chain.Tip());
3048 :
3049 : // We immediately mark the disconnected blocks as invalid.
3050 : // This prevents a case where pruned nodes may fail to invalidateblock
3051 : // and be left unable to start as they have no tip candidates (as there
3052 : // are no blocks that meet the "have data and are not invalid per
3053 : // nStatus" criteria for inclusion in setBlockIndexCandidates).
3054 418 : invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
3055 418 : setDirtyBlockIndex.insert(invalid_walk_tip);
3056 418 : setBlockIndexCandidates.erase(invalid_walk_tip);
3057 418 : setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3058 418 : if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
3059 : // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
3060 : // need to be BLOCK_FAILED_CHILD instead.
3061 0 : to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
3062 0 : setDirtyBlockIndex.insert(to_mark_failed);
3063 0 : }
3064 :
3065 : // Add any equal or more work headers to setBlockIndexCandidates
3066 418 : auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
3067 424 : while (candidate_it != candidate_blocks_by_work.end()) {
3068 6 : if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
3069 6 : setBlockIndexCandidates.insert(candidate_it->second);
3070 6 : candidate_it = candidate_blocks_by_work.erase(candidate_it);
3071 6 : } else {
3072 0 : ++candidate_it;
3073 : }
3074 : }
3075 :
3076 : // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
3077 : // iterations, or, if it's the last one, call InvalidChainFound on it.
3078 418 : to_mark_failed = invalid_walk_tip;
3079 479 : }
3080 :
3081 61 : CheckBlockIndex(chainparams.GetConsensus());
3082 :
3083 : {
3084 61 : LOCK(cs_main);
3085 61 : if (m_chain.Contains(to_mark_failed)) {
3086 : // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
3087 0 : return false;
3088 : }
3089 :
3090 : // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
3091 61 : to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
3092 61 : setDirtyBlockIndex.insert(to_mark_failed);
3093 61 : setBlockIndexCandidates.erase(to_mark_failed);
3094 61 : m_blockman.m_failed_blocks.insert(to_mark_failed);
3095 :
3096 : // If any new blocks somehow arrived while we were disconnecting
3097 : // (above), then the pre-calculation of what should go into
3098 : // setBlockIndexCandidates may have missed entries. This would
3099 : // technically be an inconsistency in the block index, but if we clean
3100 : // it up here, this should be an essentially unobservable error.
3101 : // Loop back over all block index entries and add any missing entries
3102 : // to setBlockIndexCandidates.
3103 61 : BlockMap::iterator it = m_blockman.m_block_index.begin();
3104 14040 : while (it != m_blockman.m_block_index.end()) {
3105 13979 : if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
3106 67 : setBlockIndexCandidates.insert(it->second);
3107 67 : }
3108 13979 : it++;
3109 : }
3110 :
3111 61 : InvalidChainFound(to_mark_failed);
3112 61 : }
3113 :
3114 : // Only notify about a new block tip if the active chain was modified.
3115 61 : if (pindex_was_in_chain) {
3116 60 : uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
3117 : }
3118 61 : return true;
3119 61 : }
3120 :
3121 51 : bool InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
3122 51 : return ::ChainstateActive().InvalidateBlock(state, chainparams, pindex);
3123 : }
3124 :
3125 7 : void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
3126 7 : AssertLockHeld(cs_main);
3127 :
3128 7 : int nHeight = pindex->nHeight;
3129 :
3130 : // Remove the invalidity flag from this block and all its descendants.
3131 7 : BlockMap::iterator it = m_blockman.m_block_index.begin();
3132 982 : while (it != m_blockman.m_block_index.end()) {
3133 975 : if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
3134 210 : it->second->nStatus &= ~BLOCK_FAILED_MASK;
3135 210 : setDirtyBlockIndex.insert(it->second);
3136 210 : if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
3137 210 : setBlockIndexCandidates.insert(it->second);
3138 210 : }
3139 210 : if (it->second == pindexBestInvalid) {
3140 : // Reset invalid block marker if it was pointing to one of those.
3141 6 : pindexBestInvalid = nullptr;
3142 6 : }
3143 210 : m_blockman.m_failed_blocks.erase(it->second);
3144 210 : }
3145 975 : it++;
3146 : }
3147 :
3148 : // Remove the invalidity flag from all ancestors too.
3149 764 : while (pindex != nullptr) {
3150 757 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
3151 1 : pindex->nStatus &= ~BLOCK_FAILED_MASK;
3152 1 : setDirtyBlockIndex.insert(pindex);
3153 1 : m_blockman.m_failed_blocks.erase(pindex);
3154 1 : }
3155 757 : pindex = pindex->pprev;
3156 : }
3157 7 : }
3158 :
3159 7 : void ResetBlockFailureFlags(CBlockIndex *pindex) {
3160 7 : return ::ChainstateActive().ResetBlockFailureFlags(pindex);
3161 : }
3162 :
3163 48118 : CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
3164 : {
3165 48118 : AssertLockHeld(cs_main);
3166 :
3167 : // Check for duplicate
3168 48118 : uint256 hash = block.GetHash();
3169 48118 : BlockMap::iterator it = m_block_index.find(hash);
3170 48118 : if (it != m_block_index.end())
3171 3 : return it->second;
3172 :
3173 : // Construct new block index object
3174 48115 : CBlockIndex* pindexNew = new CBlockIndex(block);
3175 : // We assign the sequence id to blocks only when the full data is available,
3176 : // to avoid miners withholding blocks but broadcasting headers, to get a
3177 : // competitive advantage.
3178 48115 : pindexNew->nSequenceId = 0;
3179 48115 : BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3180 48115 : pindexNew->phashBlock = &((*mi).first);
3181 48115 : BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3182 48115 : if (miPrev != m_block_index.end())
3183 : {
3184 47824 : pindexNew->pprev = (*miPrev).second;
3185 47824 : pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3186 47824 : pindexNew->BuildSkip();
3187 47824 : }
3188 48115 : pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
3189 48115 : pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
3190 48115 : pindexNew->RaiseValidity(BLOCK_VALID_TREE);
3191 48115 : if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
3192 46365 : pindexBestHeader = pindexNew;
3193 :
3194 48115 : setDirtyBlockIndex.insert(pindexNew);
3195 :
3196 48115 : return pindexNew;
3197 48118 : }
3198 :
3199 : /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3200 45425 : void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
3201 : {
3202 45425 : pindexNew->nTx = block.vtx.size();
3203 45425 : pindexNew->nChainTx = 0;
3204 45425 : pindexNew->nFile = pos.nFile;
3205 45425 : pindexNew->nDataPos = pos.nPos;
3206 45425 : pindexNew->nUndoPos = 0;
3207 45425 : pindexNew->nStatus |= BLOCK_HAVE_DATA;
3208 45425 : if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) {
3209 43015 : pindexNew->nStatus |= BLOCK_OPT_WITNESS;
3210 43015 : }
3211 45425 : pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
3212 45425 : setDirtyBlockIndex.insert(pindexNew);
3213 :
3214 45425 : if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
3215 : // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3216 44901 : std::deque<CBlockIndex*> queue;
3217 44901 : queue.push_back(pindexNew);
3218 :
3219 : // Recursively process any descendant blocks that now may be eligible to be connected.
3220 90323 : while (!queue.empty()) {
3221 45422 : CBlockIndex *pindex = queue.front();
3222 45422 : queue.pop_front();
3223 45422 : pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
3224 : {
3225 45422 : LOCK(cs_nBlockSequenceId);
3226 45422 : pindex->nSequenceId = nBlockSequenceId++;
3227 45422 : }
3228 45422 : if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3229 43796 : setBlockIndexCandidates.insert(pindex);
3230 43796 : }
3231 45422 : std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3232 45943 : while (range.first != range.second) {
3233 521 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
3234 521 : queue.push_back(it->second);
3235 521 : range.first++;
3236 521 : m_blockman.m_blocks_unlinked.erase(it);
3237 521 : }
3238 45422 : }
3239 44901 : } else {
3240 524 : if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
3241 524 : m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
3242 524 : }
3243 : }
3244 45425 : }
3245 :
3246 45425 : static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
3247 : {
3248 45425 : LOCK(cs_LastBlockFile);
3249 :
3250 90850 : unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3251 45425 : if (vinfoBlockFile.size() <= nFile) {
3252 107 : vinfoBlockFile.resize(nFile + 1);
3253 : }
3254 :
3255 45425 : bool finalize_undo = false;
3256 45425 : if (!fKnown) {
3257 44098 : while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3258 : // when the undo file is keeping up with the block file, we want to flush it explicitly
3259 : // when it is lagging behind (more blocks arrive than are being connected), we let the
3260 : // undo block write case handle it
3261 10 : finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)ChainActive().Tip()->nHeight);
3262 10 : nFile++;
3263 10 : if (vinfoBlockFile.size() <= nFile) {
3264 10 : vinfoBlockFile.resize(nFile + 1);
3265 : }
3266 : }
3267 44088 : pos.nFile = nFile;
3268 44088 : pos.nPos = vinfoBlockFile[nFile].nSize;
3269 44088 : }
3270 :
3271 45425 : if ((int)nFile != nLastBlockFile) {
3272 10 : if (!fKnown) {
3273 10 : LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
3274 10 : }
3275 10 : FlushBlockFile(!fKnown, finalize_undo);
3276 10 : nLastBlockFile = nFile;
3277 10 : }
3278 :
3279 45425 : vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3280 45425 : if (fKnown)
3281 1337 : vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3282 : else
3283 44088 : vinfoBlockFile[nFile].nSize += nAddSize;
3284 :
3285 45425 : if (!fKnown) {
3286 44088 : bool out_of_space;
3287 44088 : size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3288 44088 : if (out_of_space) {
3289 0 : return AbortNode("Disk space is too low!", _("Disk space is too low!"));
3290 : }
3291 44088 : if (bytes_allocated != 0 && fPruneMode) {
3292 1 : fCheckForPruning = true;
3293 1 : }
3294 44088 : }
3295 :
3296 45425 : setDirtyFileInfo.insert(nFile);
3297 45425 : return true;
3298 45425 : }
3299 :
3300 44902 : static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
3301 : {
3302 44902 : pos.nFile = nFile;
3303 :
3304 44902 : LOCK(cs_LastBlockFile);
3305 :
3306 44902 : pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3307 44902 : vinfoBlockFile[nFile].nUndoSize += nAddSize;
3308 44902 : setDirtyFileInfo.insert(nFile);
3309 :
3310 44902 : bool out_of_space;
3311 44902 : size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3312 44902 : if (out_of_space) {
3313 0 : return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
3314 : }
3315 44902 : if (bytes_allocated != 0 && fPruneMode) {
3316 1 : fCheckForPruning = true;
3317 1 : }
3318 :
3319 44902 : return true;
3320 44902 : }
3321 :
3322 143749 : static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
3323 : {
3324 : // Check proof of work matches claimed amount
3325 143749 : if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
3326 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
3327 :
3328 143748 : return true;
3329 143749 : }
3330 :
3331 201636 : bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
3332 : {
3333 : // These are checks that are independent of context.
3334 :
3335 201636 : if (block.fChecked)
3336 105732 : return true;
3337 :
3338 : // Check that the header is valid (particularly PoW). This is mostly
3339 : // redundant with the call in AcceptBlockHeader.
3340 95904 : if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3341 1 : return false;
3342 :
3343 : // Check the merkle root.
3344 95903 : if (fCheckMerkleRoot) {
3345 53856 : bool mutated;
3346 53856 : uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3347 53856 : if (block.hashMerkleRoot != hashMerkleRoot2)
3348 11 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3349 :
3350 : // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3351 : // of transactions in a block without affecting the merkle root of a block,
3352 : // while still invalidating it.
3353 53845 : if (mutated)
3354 75 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3355 53856 : }
3356 :
3357 : // All potential-corruption validation must be done before we do any
3358 : // transaction validation, as otherwise we may mark the header as invalid
3359 : // because we receive the wrong transactions for it.
3360 : // Note that witness malleability is checked in ContextualCheckBlock, so no
3361 : // checks that use witness data may be performed here.
3362 :
3363 : // Size limits
3364 95817 : if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
3365 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3366 :
3367 : // First transaction must be coinbase, the rest must not be
3368 95815 : if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3369 2 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3370 184879 : for (unsigned int i = 1; i < block.vtx.size(); i++)
3371 89068 : if (block.vtx[i]->IsCoinBase())
3372 376360 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3373 :
3374 : // Check transactions
3375 : // Must check for duplicate inputs (see CVE-2018-17144)
3376 280686 : for (const auto& tx : block.vtx) {
3377 184875 : TxValidationState tx_state;
3378 184875 : if (!CheckTransaction(*tx, tx_state)) {
3379 : // CheckBlock() does context-free validation checks. The only
3380 : // possible failures are consensus failures.
3381 139 : assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
3382 278 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(),
3383 139 : strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3384 : }
3385 184875 : }
3386 : unsigned int nSigOps = 0;
3387 280198 : for (const auto& tx : block.vtx)
3388 : {
3389 184526 : nSigOps += GetLegacySigOpCount(*tx);
3390 : }
3391 95672 : if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
3392 8 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3393 :
3394 95664 : if (fCheckPOW && fCheckMerkleRoot)
3395 53627 : block.fChecked = true;
3396 :
3397 95664 : return true;
3398 201636 : }
3399 :
3400 339038 : bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
3401 : {
3402 339038 : int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3403 339038 : return (height >= params.SegwitHeight);
3404 : }
3405 :
3406 113988 : int GetWitnessCommitmentIndex(const CBlock& block)
3407 : {
3408 113988 : int commitpos = -1;
3409 113988 : if (!block.vtx.empty()) {
3410 313960 : for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
3411 199972 : const CTxOut& vout = block.vtx[0]->vout[o];
3412 283297 : if (vout.scriptPubKey.size() >= MINIMUM_WITNESS_COMMITMENT &&
3413 83380 : vout.scriptPubKey[0] == OP_RETURN &&
3414 83325 : vout.scriptPubKey[1] == 0x24 &&
3415 83325 : vout.scriptPubKey[2] == 0xaa &&
3416 83325 : vout.scriptPubKey[3] == 0x21 &&
3417 83325 : vout.scriptPubKey[4] == 0xa9 &&
3418 83325 : vout.scriptPubKey[5] == 0xed) {
3419 83325 : commitpos = o;
3420 83325 : }
3421 : }
3422 113988 : }
3423 113988 : return commitpos;
3424 : }
3425 :
3426 24501 : void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3427 : {
3428 24501 : int commitpos = GetWitnessCommitmentIndex(block);
3429 24501 : static const std::vector<unsigned char> nonce(32, 0x00);
3430 24501 : if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) {
3431 20950 : CMutableTransaction tx(*block.vtx[0]);
3432 20950 : tx.vin[0].scriptWitness.stack.resize(1);
3433 20950 : tx.vin[0].scriptWitness.stack[0] = nonce;
3434 20950 : block.vtx[0] = MakeTransactionRef(std::move(tx));
3435 20950 : }
3436 24501 : }
3437 :
3438 24298 : std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3439 : {
3440 24298 : std::vector<unsigned char> commitment;
3441 24298 : int commitpos = GetWitnessCommitmentIndex(block);
3442 24298 : std::vector<unsigned char> ret(32, 0x00);
3443 48596 : if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
3444 24298 : if (commitpos == -1) {
3445 24297 : uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3446 24297 : CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
3447 24297 : CTxOut out;
3448 24297 : out.nValue = 0;
3449 24297 : out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
3450 24297 : out.scriptPubKey[0] = OP_RETURN;
3451 24297 : out.scriptPubKey[1] = 0x24;
3452 24297 : out.scriptPubKey[2] = 0xaa;
3453 24297 : out.scriptPubKey[3] = 0x21;
3454 24297 : out.scriptPubKey[4] = 0xa9;
3455 24297 : out.scriptPubKey[5] = 0xed;
3456 24297 : memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3457 24297 : commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3458 24297 : CMutableTransaction tx(*block.vtx[0]);
3459 24297 : tx.vout.push_back(out);
3460 24297 : block.vtx[0] = MakeTransactionRef(std::move(tx));
3461 24297 : }
3462 : }
3463 24298 : UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
3464 : return commitment;
3465 24298 : }
3466 :
3467 : //! Returns last CBlockIndex* that is a checkpoint
3468 68732 : static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3469 : {
3470 68732 : const MapCheckpoints& checkpoints = data.mapCheckpoints;
3471 :
3472 137464 : for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
3473 : {
3474 68732 : const uint256& hash = i.second;
3475 68732 : CBlockIndex* pindex = LookupBlockIndex(hash);
3476 68732 : if (pindex) {
3477 68184 : return pindex;
3478 : }
3479 1096 : }
3480 548 : return nullptr;
3481 68732 : }
3482 :
3483 : /** Context-dependent validity checks.
3484 : * By "context", we mean only the previous block headers, but not the UTXO
3485 : * set; UTXO-related validity checks are done in ConnectBlock().
3486 : * NOTE: This function is not currently invoked by ConnectBlock(), so we
3487 : * should consider upgrade issues if we change which consensus rules are
3488 : * enforced in this function (eg by adding a new consensus rule). See comment
3489 : * in ConnectBlock().
3490 : * Note that -reindex-chainstate skips the validation that happens here!
3491 : */
3492 68865 : static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3493 : {
3494 68865 : assert(pindexPrev != nullptr);
3495 68865 : const int nHeight = pindexPrev->nHeight + 1;
3496 :
3497 : // Check proof of work
3498 68865 : const Consensus::Params& consensusParams = params.GetConsensus();
3499 68865 : if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3500 2 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3501 :
3502 : // Check against checkpoints
3503 68863 : if (fCheckpointsEnabled) {
3504 : // Don't accept any forks from the main chain prior to last checkpoint.
3505 : // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3506 : // BlockIndex().
3507 68732 : CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
3508 68732 : if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3509 1 : LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3510 1 : return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3511 : }
3512 68731 : }
3513 :
3514 : // Check timestamp against prev
3515 68862 : if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3516 6 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3517 :
3518 : // Check timestamp
3519 68856 : if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
3520 4 : return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3521 :
3522 : // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3523 : // check for version 2, 3 and 4 upgrades
3524 70976 : if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
3525 68851 : (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
3526 68850 : (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
3527 3 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3528 3 : strprintf("rejected nVersion=0x%08x block", block.nVersion));
3529 :
3530 68849 : return true;
3531 68865 : }
3532 :
3533 : /** NOTE: This function is not currently invoked by ConnectBlock(), so we
3534 : * should consider upgrade issues if we change which consensus rules are
3535 : * enforced in this function (eg by adding a new consensus rule). See comment
3536 : * in ConnectBlock().
3537 : * Note that -reindex-chainstate skips the validation that happens here!
3538 : */
3539 66179 : static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
3540 : {
3541 66179 : const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3542 :
3543 : // Start enforcing BIP113 (Median Time Past).
3544 : int nLockTimeFlags = 0;
3545 66179 : if (nHeight >= consensusParams.CSVHeight) {
3546 12746 : assert(pindexPrev != nullptr);
3547 : nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3548 12746 : }
3549 :
3550 66179 : int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3551 12746 : ? pindexPrev->GetMedianTimePast()
3552 53433 : : block.GetBlockTime();
3553 :
3554 : // Check that all transactions are finalized
3555 189787 : for (const auto& tx : block.vtx) {
3556 123608 : if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3557 7 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3558 : }
3559 123601 : }
3560 :
3561 : // Enforce rule that the coinbase starts with serialized block height
3562 66172 : if (nHeight >= consensusParams.BIP34Height)
3563 : {
3564 11197 : CScript expect = CScript() << nHeight;
3565 22393 : if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3566 11196 : !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3567 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3568 : }
3569 11197 : }
3570 :
3571 : // Validation for witness commitments.
3572 : // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3573 : // coinbase (where 0x0000....0000 is used instead).
3574 : // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3575 : // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3576 : // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3577 : // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3578 : // multiple, the last one is used.
3579 66171 : bool fHaveWitness = false;
3580 66171 : if (nHeight >= consensusParams.SegwitHeight) {
3581 62833 : int commitpos = GetWitnessCommitmentIndex(block);
3582 62833 : if (commitpos != -1) {
3583 56403 : bool malleated = false;
3584 56403 : uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3585 : // The malleation check is ignored; as the transaction tree itself
3586 : // already does not permit it, it is impossible to trigger in the
3587 : // witness tree.
3588 56403 : if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3589 62835 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3590 : }
3591 56401 : CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
3592 56401 : if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3593 3 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3594 : }
3595 : fHaveWitness = true;
3596 56403 : }
3597 62828 : }
3598 :
3599 : // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3600 66166 : if (!fHaveWitness) {
3601 41645 : for (const auto& tx : block.vtx) {
3602 31877 : if (tx->HasWitness()) {
3603 5 : return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3604 : }
3605 31872 : }
3606 : }
3607 :
3608 : // After the coinbase witness reserved value and commitment are verified,
3609 : // we can check if the block weight passes (before we've checked the
3610 : // coinbase witness, it would be possible for the weight to be too
3611 : // large by filling up the coinbase witness, which doesn't change
3612 : // the block hash, so we couldn't mark the block as permanently
3613 : // failed).
3614 66161 : if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3615 1 : return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3616 : }
3617 :
3618 66160 : return true;
3619 66179 : }
3620 :
3621 247137 : bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
3622 : {
3623 247137 : AssertLockHeld(cs_main);
3624 : // Check for duplicate
3625 247137 : uint256 hash = block.GetHash();
3626 247137 : BlockMap::iterator miSelf = m_block_index.find(hash);
3627 : CBlockIndex *pindex = nullptr;
3628 247137 : if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3629 247126 : if (miSelf != m_block_index.end()) {
3630 : // Block header is already known.
3631 199281 : pindex = miSelf->second;
3632 199281 : if (ppindex)
3633 199281 : *ppindex = pindex;
3634 199281 : if (pindex->nStatus & BLOCK_FAILED_MASK) {
3635 21 : LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
3636 21 : return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3637 : }
3638 199260 : return true;
3639 : }
3640 :
3641 47845 : if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
3642 0 : LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3643 0 : return false;
3644 : }
3645 :
3646 : // Get prev block index
3647 : CBlockIndex* pindexPrev = nullptr;
3648 47845 : BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
3649 47845 : if (mi == m_block_index.end()) {
3650 3 : LogPrintf("ERROR: %s: prev block not found\n", __func__);
3651 3 : return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3652 : }
3653 47842 : pindexPrev = (*mi).second;
3654 47842 : if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3655 4 : LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3656 4 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3657 : }
3658 47838 : if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
3659 13 : return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
3660 :
3661 : /* Determine if this block descends from any block which has been found
3662 : * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3663 : * them as failed. For example:
3664 : *
3665 : * D3
3666 : * /
3667 : * B2 - C2
3668 : * / \
3669 : * A D2 - E2 - F2
3670 : * \
3671 : * B1 - C1 - D1 - E1
3672 : *
3673 : * In the case that we attempted to reorg from E1 to F2, only to find
3674 : * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3675 : * but NOT D3 (it was not in any of our candidate sets at the time).
3676 : *
3677 : * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3678 : * in LoadBlockIndex.
3679 : */
3680 47825 : if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3681 : // The above does not mean "invalid": it checks if the previous block
3682 : // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3683 : // optimization, in the common case of adding a new block to the tip,
3684 : // we don't need to iterate over the failed blocks list.
3685 64549 : for (const CBlockIndex* failedit : m_failed_blocks) {
3686 48182 : if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3687 1 : assert(failedit->nStatus & BLOCK_FAILED_VALID);
3688 1 : CBlockIndex* invalid_walk = pindexPrev;
3689 2 : while (invalid_walk != failedit) {
3690 1 : invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3691 1 : setDirtyBlockIndex.insert(invalid_walk);
3692 1 : invalid_walk = invalid_walk->pprev;
3693 : }
3694 1 : LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3695 1 : return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3696 1 : }
3697 48181 : }
3698 : }
3699 47845 : }
3700 47835 : if (pindex == nullptr)
3701 47835 : pindex = AddToBlockIndex(block);
3702 :
3703 47835 : if (ppindex)
3704 47835 : *ppindex = pindex;
3705 :
3706 47835 : return true;
3707 247137 : }
3708 :
3709 : // Exposed wrapper for AcceptBlockHeader
3710 17819 : bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
3711 : {
3712 17819 : AssertLockNotHeld(cs_main);
3713 : {
3714 17819 : LOCK(cs_main);
3715 208101 : for (const CBlockHeader& header : headers) {
3716 190282 : CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3717 190282 : bool accepted = m_blockman.AcceptBlockHeader(
3718 : header, state, chainparams, &pindex);
3719 190282 : ::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus());
3720 :
3721 190282 : if (!accepted) {
3722 27 : return false;
3723 : }
3724 190255 : if (ppindex) {
3725 190192 : *ppindex = pindex;
3726 190192 : }
3727 190282 : }
3728 17819 : }
3729 17792 : if (NotifyHeaderTip()) {
3730 13149 : if (::ChainstateActive().IsInitialBlockDownload() && ppindex && *ppindex) {
3731 367 : LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
3732 367 : }
3733 : }
3734 17792 : return true;
3735 17819 : }
3736 :
3737 : /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
3738 45425 : static FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const FlatFilePos* dbp) {
3739 45425 : unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
3740 45425 : FlatFilePos blockPos;
3741 45425 : if (dbp != nullptr)
3742 1337 : blockPos = *dbp;
3743 45425 : if (!FindBlockPos(blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) {
3744 0 : error("%s: FindBlockPos failed", __func__);
3745 0 : return FlatFilePos();
3746 : }
3747 45425 : if (dbp == nullptr) {
3748 44088 : if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
3749 0 : AbortNode("Failed to write block");
3750 0 : return FlatFilePos();
3751 : }
3752 : }
3753 45425 : return blockPos;
3754 45425 : }
3755 :
3756 : /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
3757 56855 : bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
3758 : {
3759 56855 : const CBlock& block = *pblock;
3760 :
3761 56855 : if (fNewBlock) *fNewBlock = false;
3762 56855 : AssertLockHeld(cs_main);
3763 :
3764 56855 : CBlockIndex *pindexDummy = nullptr;
3765 56855 : CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3766 :
3767 56855 : bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex);
3768 56855 : CheckBlockIndex(chainparams.GetConsensus());
3769 :
3770 56855 : if (!accepted_header)
3771 15 : return false;
3772 :
3773 : // Try to process all requested blocks that we don't have, but only
3774 : // process an unrequested block if it's new and has enough work to
3775 : // advance our tip, and isn't too many blocks ahead.
3776 56840 : bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3777 56840 : bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3778 : // Blocks that are too out-of-order needlessly limit the effectiveness of
3779 : // pruning, because pruning will not delete block files that contain any
3780 : // blocks which are too close in height to the tip. Apply this test
3781 : // regardless of whether pruning is enabled; it should generally be safe to
3782 : // not process unrequested blocks.
3783 56840 : bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
3784 :
3785 : // TODO: Decouple this function from the block download logic by removing fRequested
3786 : // This requires some new chain data structure to efficiently look up if a
3787 : // block is in a chain leading to a candidate for best tip, despite not
3788 : // being such a candidate itself.
3789 :
3790 : // TODO: deal better with return value and error conditions for duplicate
3791 : // and unrequested blocks.
3792 56840 : if (fAlreadyHave) return true;
3793 46254 : if (!fRequested) { // If we didn't ask for it:
3794 1697 : if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3795 1697 : if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3796 605 : if (fTooFarAhead) return true; // Block height is too high
3797 :
3798 : // Protect against DoS attacks from low-work chains.
3799 : // If our tip is behind, a peer could try to send us
3800 : // low-work blocks on a fake chain that we would never
3801 : // request; don't process these.
3802 604 : if (pindex->nChainWork < nMinimumChainWork) return true;
3803 : }
3804 :
3805 45160 : if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
3806 45160 : !ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
3807 18 : if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3808 8 : pindex->nStatus |= BLOCK_FAILED_VALID;
3809 8 : setDirtyBlockIndex.insert(pindex);
3810 8 : }
3811 18 : return error("%s: %s", __func__, state.ToString());
3812 : }
3813 :
3814 : // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3815 : // (but if it does not build on our best tip, let the SendMessages loop relay it)
3816 45142 : if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3817 37020 : GetMainSignals().NewPoWValidBlock(pindex, pblock);
3818 :
3819 : // Write block to history file
3820 45142 : if (fNewBlock) *fNewBlock = true;
3821 : try {
3822 45142 : FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
3823 45142 : if (blockPos.IsNull()) {
3824 0 : state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3825 0 : return false;
3826 : }
3827 45142 : ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
3828 45142 : } catch (const std::runtime_error& e) {
3829 0 : return AbortNode(state, std::string("System error: ") + e.what());
3830 0 : }
3831 :
3832 45142 : FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
3833 :
3834 45142 : CheckBlockIndex(chainparams.GetConsensus());
3835 :
3836 45142 : return true;
3837 56855 : }
3838 :
3839 55639 : bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock)
3840 : {
3841 55639 : AssertLockNotHeld(cs_main);
3842 :
3843 : {
3844 55639 : CBlockIndex *pindex = nullptr;
3845 55639 : if (fNewBlock) *fNewBlock = false;
3846 55639 : BlockValidationState state;
3847 :
3848 : // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
3849 : // Therefore, the following critical section must include the CheckBlock() call as well.
3850 55639 : LOCK(cs_main);
3851 :
3852 : // Ensure that CheckBlock() passes before calling AcceptBlock, as
3853 : // belt-and-suspenders.
3854 55639 : bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
3855 55639 : if (ret) {
3856 : // Store to disk
3857 55418 : ret = ::ChainstateActive().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
3858 55418 : }
3859 55639 : if (!ret) {
3860 254 : GetMainSignals().BlockChecked(*pblock, state);
3861 254 : return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
3862 : }
3863 55639 : }
3864 :
3865 55385 : NotifyHeaderTip();
3866 :
3867 55385 : BlockValidationState state; // Only used to report errors, not invalidity - ignore it
3868 55385 : if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
3869 1 : return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
3870 :
3871 55384 : return true;
3872 55639 : }
3873 :
3874 21027 : bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
3875 : {
3876 21027 : AssertLockHeld(cs_main);
3877 21027 : assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
3878 21027 : CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip());
3879 21027 : uint256 block_hash(block.GetHash());
3880 21027 : CBlockIndex indexDummy(block);
3881 21027 : indexDummy.pprev = pindexPrev;
3882 21027 : indexDummy.nHeight = pindexPrev->nHeight + 1;
3883 21027 : indexDummy.phashBlock = &block_hash;
3884 :
3885 : // NOTE: CheckBlockHeader is called by CheckBlock
3886 21027 : if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
3887 3 : return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
3888 21024 : if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
3889 5 : return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
3890 21019 : if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
3891 1 : return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
3892 21018 : if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
3893 5 : return false;
3894 21013 : assert(state.IsValid());
3895 :
3896 21013 : return true;
3897 21027 : }
3898 :
3899 : /**
3900 : * BLOCK PRUNING CODE
3901 : */
3902 :
3903 : /* Calculate the amount of disk space the block & undo files currently use */
3904 363 : uint64_t CalculateCurrentUsage()
3905 : {
3906 363 : LOCK(cs_LastBlockFile);
3907 :
3908 : uint64_t retval = 0;
3909 726 : for (const CBlockFileInfo &file : vinfoBlockFile) {
3910 363 : retval += file.nSize + file.nUndoSize;
3911 : }
3912 : return retval;
3913 363 : }
3914 :
3915 3 : void ChainstateManager::PruneOneBlockFile(const int fileNumber)
3916 : {
3917 3 : AssertLockHeld(cs_main);
3918 3 : LOCK(cs_LastBlockFile);
3919 :
3920 309 : for (const auto& entry : m_blockman.m_block_index) {
3921 306 : CBlockIndex* pindex = entry.second;
3922 306 : if (pindex->nFile == fileNumber) {
3923 203 : pindex->nStatus &= ~BLOCK_HAVE_DATA;
3924 203 : pindex->nStatus &= ~BLOCK_HAVE_UNDO;
3925 203 : pindex->nFile = 0;
3926 203 : pindex->nDataPos = 0;
3927 203 : pindex->nUndoPos = 0;
3928 203 : setDirtyBlockIndex.insert(pindex);
3929 :
3930 : // Prune from m_blocks_unlinked -- any block we prune would have
3931 : // to be downloaded again in order to consider its chain, at which
3932 : // point it would be considered as a candidate for
3933 : // m_blocks_unlinked or setBlockIndexCandidates.
3934 203 : auto range = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
3935 203 : while (range.first != range.second) {
3936 0 : std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
3937 0 : range.first++;
3938 0 : if (_it->second == pindex) {
3939 0 : m_blockman.m_blocks_unlinked.erase(_it);
3940 0 : }
3941 0 : }
3942 203 : }
3943 306 : }
3944 :
3945 3 : vinfoBlockFile[fileNumber].SetNull();
3946 3 : setDirtyFileInfo.insert(fileNumber);
3947 3 : }
3948 :
3949 :
3950 3 : void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
3951 : {
3952 6 : for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
3953 3 : FlatFilePos pos(*it, 0);
3954 3 : fs::remove(BlockFileSeq().FileName(pos));
3955 3 : fs::remove(UndoFileSeq().FileName(pos));
3956 3 : LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
3957 3 : }
3958 3 : }
3959 :
3960 : /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
3961 0 : static void FindFilesToPruneManual(ChainstateManager& chainman, std::set<int>& setFilesToPrune, int nManualPruneHeight)
3962 : {
3963 0 : assert(fPruneMode && nManualPruneHeight > 0);
3964 :
3965 0 : LOCK2(cs_main, cs_LastBlockFile);
3966 0 : if (::ChainActive().Tip() == nullptr)
3967 0 : return;
3968 :
3969 : // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
3970 0 : unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
3971 0 : int count=0;
3972 0 : for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3973 0 : if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
3974 : continue;
3975 0 : chainman.PruneOneBlockFile(fileNumber);
3976 0 : setFilesToPrune.insert(fileNumber);
3977 0 : count++;
3978 0 : }
3979 0 : LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
3980 0 : }
3981 :
3982 : /* This function is called from the RPC code for pruneblockchain */
3983 0 : void PruneBlockFilesManual(int nManualPruneHeight)
3984 : {
3985 0 : BlockValidationState state;
3986 0 : const CChainParams& chainparams = Params();
3987 0 : if (!::ChainstateActive().FlushStateToDisk(
3988 : chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
3989 0 : LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
3990 0 : }
3991 0 : }
3992 :
3993 : /**
3994 : * Prune block and undo files (blk???.dat and undo???.dat) so that the disk space used is less than a user-defined target.
3995 : * The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
3996 : * space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
3997 : * (which in this case means the blockchain must be re-downloaded.)
3998 : *
3999 : * Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
4000 : * Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
4001 : * Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
4002 : * Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
4003 : * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
4004 : * A db flag records the fact that at least some block files have been pruned.
4005 : *
4006 : * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
4007 : */
4008 6 : static void FindFilesToPrune(ChainstateManager& chainman, std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
4009 : {
4010 6 : LOCK2(cs_main, cs_LastBlockFile);
4011 6 : if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
4012 1 : return;
4013 : }
4014 5 : if ((uint64_t)::ChainActive().Tip()->nHeight <= nPruneAfterHeight) {
4015 5 : return;
4016 : }
4017 :
4018 0 : unsigned int nLastBlockWeCanPrune = ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
4019 0 : uint64_t nCurrentUsage = CalculateCurrentUsage();
4020 : // We don't check to prune until after we've allocated new space for files
4021 : // So we should leave a buffer under our target to account for another allocation
4022 : // before the next pruning.
4023 : uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
4024 : uint64_t nBytesToPrune;
4025 0 : int count=0;
4026 :
4027 0 : if (nCurrentUsage + nBuffer >= nPruneTarget) {
4028 : // On a prune event, the chainstate DB is flushed.
4029 : // To avoid excessive prune events negating the benefit of high dbcache
4030 : // values, we should not prune too rapidly.
4031 : // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
4032 0 : if (::ChainstateActive().IsInitialBlockDownload()) {
4033 : // Since this is only relevant during IBD, we use a fixed 10%
4034 0 : nBuffer += nPruneTarget / 10;
4035 0 : }
4036 :
4037 0 : for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4038 0 : nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
4039 :
4040 0 : if (vinfoBlockFile[fileNumber].nSize == 0)
4041 : continue;
4042 :
4043 0 : if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
4044 0 : break;
4045 :
4046 : // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
4047 0 : if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
4048 : continue;
4049 :
4050 0 : chainman.PruneOneBlockFile(fileNumber);
4051 : // Queue up the files for removal
4052 0 : setFilesToPrune.insert(fileNumber);
4053 0 : nCurrentUsage -= nBytesToPrune;
4054 0 : count++;
4055 0 : }
4056 0 : }
4057 :
4058 0 : LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
4059 : nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
4060 : ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
4061 : nLastBlockWeCanPrune, count);
4062 6 : }
4063 :
4064 194682 : static FlatFileSeq BlockFileSeq()
4065 : {
4066 194682 : return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
4067 0 : }
4068 :
4069 102462 : static FlatFileSeq UndoFileSeq()
4070 : {
4071 102462 : return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
4072 0 : }
4073 :
4074 149205 : FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
4075 149205 : return BlockFileSeq().Open(pos, fReadOnly);
4076 0 : }
4077 :
4078 : /** Open an undo file (rev?????.dat) */
4079 56187 : static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
4080 56187 : return UndoFileSeq().Open(pos, fReadOnly);
4081 0 : }
4082 :
4083 16 : fs::path GetBlockPosFilename(const FlatFilePos &pos)
4084 : {
4085 16 : return BlockFileSeq().FileName(pos);
4086 0 : }
4087 :
4088 100692 : CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
4089 : {
4090 100692 : AssertLockHeld(cs_main);
4091 :
4092 100692 : if (hash.IsNull())
4093 307 : return nullptr;
4094 :
4095 : // Return existing
4096 100385 : BlockMap::iterator mi = m_block_index.find(hash);
4097 100385 : if (mi != m_block_index.end())
4098 50039 : return (*mi).second;
4099 :
4100 : // Create new
4101 50346 : CBlockIndex* pindexNew = new CBlockIndex();
4102 50346 : mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
4103 50346 : pindexNew->phashBlock = &((*mi).first);
4104 :
4105 50346 : return pindexNew;
4106 100692 : }
4107 :
4108 490 : bool BlockManager::LoadBlockIndex(
4109 : const Consensus::Params& consensus_params,
4110 : CBlockTreeDB& blocktree,
4111 : std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
4112 : {
4113 101182 : if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
4114 0 : return false;
4115 :
4116 : // Calculate nChainWork
4117 490 : std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
4118 490 : vSortedByHeight.reserve(m_block_index.size());
4119 50836 : for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
4120 : {
4121 50346 : CBlockIndex* pindex = item.second;
4122 50346 : vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
4123 50346 : }
4124 490 : sort(vSortedByHeight.begin(), vSortedByHeight.end());
4125 50836 : for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
4126 : {
4127 50346 : if (ShutdownRequested()) return false;
4128 50346 : CBlockIndex* pindex = item.second;
4129 50346 : pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
4130 50346 : pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
4131 : // We can link the chain of blocks for which we've received transactions at some point.
4132 : // Pruned nodes may have deleted the block.
4133 50346 : if (pindex->nTx > 0) {
4134 49800 : if (pindex->pprev) {
4135 49493 : if (pindex->pprev->HaveTxsDownloaded()) {
4136 49493 : pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
4137 49493 : } else {
4138 0 : pindex->nChainTx = 0;
4139 0 : m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
4140 : }
4141 : } else {
4142 307 : pindex->nChainTx = pindex->nTx;
4143 : }
4144 : }
4145 50346 : if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
4146 0 : pindex->nStatus |= BLOCK_FAILED_CHILD;
4147 0 : setDirtyBlockIndex.insert(pindex);
4148 0 : }
4149 50346 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
4150 49795 : block_index_candidates.insert(pindex);
4151 49795 : }
4152 50346 : if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
4153 5 : pindexBestInvalid = pindex;
4154 50346 : if (pindex->pprev)
4155 50039 : pindex->BuildSkip();
4156 50346 : if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
4157 50336 : pindexBestHeader = pindex;
4158 50346 : }
4159 :
4160 490 : return true;
4161 490 : }
4162 :
4163 599 : void BlockManager::Unload() {
4164 599 : m_failed_blocks.clear();
4165 599 : m_blocks_unlinked.clear();
4166 :
4167 4976 : for (const BlockMap::value_type& entry : m_block_index) {
4168 4377 : delete entry.second;
4169 : }
4170 :
4171 599 : m_block_index.clear();
4172 599 : }
4173 :
4174 490 : bool static LoadBlockIndexDB(ChainstateManager& chainman, const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
4175 : {
4176 490 : if (!chainman.m_blockman.LoadBlockIndex(
4177 490 : chainparams.GetConsensus(), *pblocktree,
4178 490 : ::ChainstateActive().setBlockIndexCandidates)) {
4179 0 : return false;
4180 : }
4181 :
4182 : // Load block file info
4183 490 : pblocktree->ReadLastBlockFile(nLastBlockFile);
4184 490 : vinfoBlockFile.resize(nLastBlockFile + 1);
4185 490 : LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4186 980 : for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4187 490 : pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4188 : }
4189 490 : LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
4190 490 : for (int nFile = nLastBlockFile + 1; true; nFile++) {
4191 490 : CBlockFileInfo info;
4192 490 : if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4193 0 : vinfoBlockFile.push_back(info);
4194 : } else {
4195 490 : break;
4196 : }
4197 490 : }
4198 :
4199 : // Check presence of blk files
4200 490 : LogPrintf("Checking all blk files are present...\n");
4201 490 : std::set<int> setBlkDataFiles;
4202 50836 : for (const std::pair<const uint256, CBlockIndex*>& item : chainman.BlockIndex()) {
4203 50346 : CBlockIndex* pindex = item.second;
4204 50346 : if (pindex->nStatus & BLOCK_HAVE_DATA) {
4205 49800 : setBlkDataFiles.insert(pindex->nFile);
4206 49800 : }
4207 0 : }
4208 797 : for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
4209 : {
4210 307 : FlatFilePos pos(*it, 0);
4211 307 : if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
4212 0 : return false;
4213 : }
4214 307 : }
4215 :
4216 : // Check whether we have ever pruned block & undo files
4217 490 : pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4218 490 : if (fHavePruned)
4219 0 : LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4220 :
4221 : // Check whether we need to continue reindexing
4222 490 : bool fReindexing = false;
4223 490 : pblocktree->ReadReindexing(fReindexing);
4224 490 : if(fReindexing) fReindex = true;
4225 :
4226 : return true;
4227 490 : }
4228 :
4229 495 : void CChainState::LoadMempool(const ArgsManager& args)
4230 : {
4231 495 : if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
4232 490 : ::LoadMempool(m_mempool);
4233 490 : }
4234 495 : m_mempool.SetIsLoaded(!ShutdownRequested());
4235 495 : }
4236 :
4237 305 : bool CChainState::LoadChainTip(const CChainParams& chainparams)
4238 : {
4239 305 : AssertLockHeld(cs_main);
4240 305 : const CCoinsViewCache& coins_cache = CoinsTip();
4241 305 : assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
4242 305 : const CBlockIndex* tip = m_chain.Tip();
4243 :
4244 305 : if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4245 0 : return true;
4246 : }
4247 :
4248 : // Load pointer to end of best chain
4249 305 : CBlockIndex* pindex = LookupBlockIndex(coins_cache.GetBestBlock());
4250 305 : if (!pindex) {
4251 0 : return false;
4252 : }
4253 305 : m_chain.SetTip(pindex);
4254 305 : PruneBlockIndexCandidates();
4255 :
4256 305 : tip = m_chain.Tip();
4257 305 : LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4258 305 : tip->GetBlockHash().ToString(),
4259 305 : m_chain.Height(),
4260 305 : FormatISO8601DateTime(tip->GetBlockTime()),
4261 305 : GuessVerificationProgress(chainparams.TxData(), tip));
4262 305 : return true;
4263 305 : }
4264 :
4265 612 : CVerifyDB::CVerifyDB()
4266 306 : {
4267 306 : uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4268 612 : }
4269 :
4270 612 : CVerifyDB::~CVerifyDB()
4271 306 : {
4272 306 : uiInterface.ShowProgress("", 100, false);
4273 612 : }
4274 :
4275 306 : bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
4276 : {
4277 306 : LOCK(cs_main);
4278 306 : if (::ChainActive().Tip() == nullptr || ::ChainActive().Tip()->pprev == nullptr)
4279 41 : return true;
4280 :
4281 : // Verify blocks in the best chain
4282 265 : if (nCheckDepth <= 0 || nCheckDepth > ::ChainActive().Height())
4283 12 : nCheckDepth = ::ChainActive().Height();
4284 265 : nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4285 265 : LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
4286 265 : CCoinsViewCache coins(coinsview);
4287 : CBlockIndex* pindex;
4288 5735 : CBlockIndex* pindexFailure = nullptr;
4289 265 : int nGoodTransactions = 0;
4290 265 : BlockValidationState state;
4291 : int reportDone = 0;
4292 265 : LogPrintf("[0%%]..."); /* Continued */
4293 2000 : for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
4294 1989 : const int percentageDone = std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
4295 1989 : if (reportDone < percentageDone/10) {
4296 : // report every 10% step
4297 1522 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
4298 1522 : reportDone = percentageDone/10;
4299 1522 : }
4300 1989 : uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4301 1989 : if (pindex->nHeight <= ::ChainActive().Height()-nCheckDepth)
4302 253 : break;
4303 1736 : if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4304 : // If pruning, only go back as far as we have data.
4305 0 : LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
4306 0 : break;
4307 : }
4308 1736 : CBlock block;
4309 : // check level 0: read from disk
4310 1736 : if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4311 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4312 : // check level 1: verify block validity
4313 1736 : if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
4314 0 : return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
4315 0 : pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4316 : // check level 2: verify undo validity
4317 1736 : if (nCheckLevel >= 2 && pindex) {
4318 1736 : CBlockUndo undo;
4319 1736 : if (!pindex->GetUndoPos().IsNull()) {
4320 1736 : if (!UndoReadFromDisk(undo, pindex)) {
4321 1 : return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4322 : }
4323 : }
4324 1736 : }
4325 : // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4326 1735 : if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= ::ChainstateActive().m_coinstip_cache_size_bytes) {
4327 1735 : assert(coins.GetBestBlock() == pindex->GetBlockHash());
4328 1735 : DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins);
4329 1735 : if (res == DISCONNECT_FAILED) {
4330 0 : return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4331 : }
4332 1735 : if (res == DISCONNECT_UNCLEAN) {
4333 0 : nGoodTransactions = 0;
4334 : pindexFailure = pindex;
4335 0 : } else {
4336 1735 : nGoodTransactions += block.vtx.size();
4337 : }
4338 1735 : }
4339 1735 : if (ShutdownRequested()) return true;
4340 1989 : }
4341 264 : if (pindexFailure)
4342 0 : return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", ::ChainActive().Height() - pindexFailure->nHeight + 1, nGoodTransactions);
4343 :
4344 : // store block count as we move pindex at check level >= 4
4345 264 : int block_count = ::ChainActive().Height() - pindex->nHeight;
4346 :
4347 : // check level 4: try reconnecting blocks
4348 264 : if (nCheckLevel >= 4) {
4349 208 : while (pindex != ::ChainActive().Tip()) {
4350 207 : const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
4351 207 : if (reportDone < percentageDone/10) {
4352 : // report every 10% step
4353 5 : LogPrintf("[%d%%]...", percentageDone); /* Continued */
4354 5 : reportDone = percentageDone/10;
4355 5 : }
4356 207 : uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4357 207 : pindex = ::ChainActive().Next(pindex);
4358 207 : CBlock block;
4359 207 : if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4360 0 : return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4361 207 : if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, chainparams))
4362 0 : return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4363 207 : if (ShutdownRequested()) return true;
4364 207 : }
4365 : }
4366 :
4367 264 : LogPrintf("[DONE].\n");
4368 264 : LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
4369 :
4370 264 : return true;
4371 306 : }
4372 :
4373 : /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
4374 0 : bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
4375 : {
4376 : // TODO: merge with ConnectBlock
4377 0 : CBlock block;
4378 0 : if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
4379 0 : return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4380 : }
4381 :
4382 0 : for (const CTransactionRef& tx : block.vtx) {
4383 0 : if (!tx->IsCoinBase()) {
4384 0 : for (const CTxIn &txin : tx->vin) {
4385 0 : inputs.SpendCoin(txin.prevout);
4386 : }
4387 0 : }
4388 : // Pass check = true as every addition may be an overwrite.
4389 0 : AddCoins(inputs, *tx, pindex->nHeight, true);
4390 : }
4391 0 : return true;
4392 0 : }
4393 :
4394 498 : bool CChainState::ReplayBlocks(const CChainParams& params)
4395 : {
4396 498 : LOCK(cs_main);
4397 :
4398 498 : CCoinsView& db = this->CoinsDB();
4399 498 : CCoinsViewCache cache(&db);
4400 :
4401 498 : std::vector<uint256> hashHeads = db.GetHeadBlocks();
4402 498 : if (hashHeads.empty()) return true; // We're already in a consistent state.
4403 0 : if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
4404 :
4405 0 : uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
4406 0 : LogPrintf("Replaying blocks\n");
4407 :
4408 : const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
4409 : const CBlockIndex* pindexNew; // New tip during the interrupted flush.
4410 : const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
4411 :
4412 0 : if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4413 0 : return error("ReplayBlocks(): reorganization to unknown block requested");
4414 : }
4415 0 : pindexNew = m_blockman.m_block_index[hashHeads[0]];
4416 :
4417 0 : if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
4418 0 : if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4419 0 : return error("ReplayBlocks(): reorganization from unknown block requested");
4420 : }
4421 0 : pindexOld = m_blockman.m_block_index[hashHeads[1]];
4422 0 : pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4423 0 : assert(pindexFork != nullptr);
4424 : }
4425 :
4426 : // Rollback along the old branch.
4427 0 : while (pindexOld != pindexFork) {
4428 0 : if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4429 0 : CBlock block;
4430 0 : if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
4431 0 : return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4432 : }
4433 0 : LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4434 0 : DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4435 0 : if (res == DISCONNECT_FAILED) {
4436 0 : return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4437 : }
4438 : // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4439 : // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4440 : // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4441 : // the result is still a version of the UTXO set with the effects of that block undone.
4442 0 : }
4443 0 : pindexOld = pindexOld->pprev;
4444 : }
4445 :
4446 : // Roll forward from the forking point to the new tip.
4447 0 : int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4448 0 : for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4449 0 : const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
4450 0 : LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
4451 0 : uiInterface.ShowProgress(_("Replaying blocks...").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4452 0 : if (!RollforwardBlock(pindex, cache, params)) return false;
4453 0 : }
4454 :
4455 0 : cache.SetBestBlock(pindexNew->GetBlockHash());
4456 0 : cache.Flush();
4457 0 : uiInterface.ShowProgress("", 100, false);
4458 0 : return true;
4459 498 : }
4460 :
4461 : //! Helper for CChainState::RewindBlockIndex
4462 147 : void CChainState::EraseBlockData(CBlockIndex* index)
4463 : {
4464 147 : AssertLockHeld(cs_main);
4465 147 : assert(!m_chain.Contains(index)); // Make sure this block isn't active
4466 :
4467 : // Reduce validity
4468 147 : index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
4469 : // Remove have-data flags.
4470 147 : index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
4471 : // Remove storage location.
4472 147 : index->nFile = 0;
4473 147 : index->nDataPos = 0;
4474 147 : index->nUndoPos = 0;
4475 : // Remove various other things
4476 147 : index->nTx = 0;
4477 147 : index->nChainTx = 0;
4478 147 : index->nSequenceId = 0;
4479 : // Make sure it gets written.
4480 147 : setDirtyBlockIndex.insert(index);
4481 : // Update indexes
4482 147 : setBlockIndexCandidates.erase(index);
4483 147 : auto ret = m_blockman.m_blocks_unlinked.equal_range(index->pprev);
4484 147 : while (ret.first != ret.second) {
4485 0 : if (ret.first->second == index) {
4486 0 : m_blockman.m_blocks_unlinked.erase(ret.first++);
4487 0 : } else {
4488 0 : ++ret.first;
4489 : }
4490 : }
4491 : // Mark parent as eligible for main chain again
4492 147 : if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
4493 147 : setBlockIndexCandidates.insert(index->pprev);
4494 147 : }
4495 147 : }
4496 :
4497 490 : bool CChainState::RewindBlockIndex(const CChainParams& params)
4498 : {
4499 : // Note that during -reindex-chainstate we are called with an empty m_chain!
4500 :
4501 : // First erase all post-segwit blocks without witness not in the main chain,
4502 : // as this can we done without costly DisconnectTip calls. Active
4503 : // blocks will be dealt with below (releasing cs_main in between).
4504 : {
4505 490 : LOCK(cs_main);
4506 51019 : for (const auto& entry : m_blockman.m_block_index) {
4507 50529 : if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
4508 0 : EraseBlockData(entry.second);
4509 : }
4510 0 : }
4511 490 : }
4512 :
4513 : // Find what height we need to reorganize to.
4514 : CBlockIndex *tip;
4515 : int nHeight = 1;
4516 : {
4517 490 : LOCK(cs_main);
4518 49807 : while (nHeight <= m_chain.Height()) {
4519 : // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
4520 : // blocks in ConnectBlock, we don't need to go back and
4521 : // re-download/re-verify blocks from before segwit actually activated.
4522 49318 : if (IsWitnessEnabled(m_chain[nHeight - 1], params.GetConsensus()) && !(m_chain[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
4523 : break;
4524 : }
4525 49317 : nHeight++;
4526 : }
4527 :
4528 637 : tip = m_chain.Tip();
4529 490 : }
4530 : // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4531 :
4532 490 : BlockValidationState state;
4533 : // Loop until the tip is below nHeight, or we reach a pruned block.
4534 637 : while (!ShutdownRequested()) {
4535 : {
4536 637 : LOCK(cs_main);
4537 637 : LOCK(m_mempool.cs);
4538 : // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
4539 637 : assert(tip == m_chain.Tip());
4540 637 : if (tip == nullptr || tip->nHeight < nHeight) break;
4541 147 : if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
4542 : // If pruning, don't try rewinding past the HAVE_DATA point;
4543 : // since older blocks can't be served anyway, there's
4544 : // no need to walk further, and trying to DisconnectTip()
4545 : // will fail (and require a needless reindex/redownload
4546 : // of the blockchain).
4547 0 : break;
4548 : }
4549 :
4550 : // Disconnect block
4551 147 : if (!DisconnectTip(state, params, nullptr)) {
4552 0 : return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, state.ToString());
4553 : }
4554 :
4555 : // Reduce validity flag and have-data flags.
4556 : // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4557 : // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4558 : // Note: If we encounter an insufficiently validated block that
4559 : // is on m_chain, it must be because we are a pruning node, and
4560 : // this block or some successor doesn't HAVE_DATA, so we were unable to
4561 : // rewind all the way. Blocks remaining on m_chain at this point
4562 : // must not have their validity reduced.
4563 147 : EraseBlockData(tip);
4564 :
4565 147 : tip = tip->pprev;
4566 637 : }
4567 : // Make sure the queue of validation callbacks doesn't grow unboundedly.
4568 147 : LimitValidationInterfaceQueue();
4569 :
4570 : // Occasionally flush state to disk.
4571 147 : if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
4572 0 : LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4573 0 : return false;
4574 : }
4575 : }
4576 :
4577 : {
4578 490 : LOCK(cs_main);
4579 490 : if (m_chain.Tip() != nullptr) {
4580 : // We can't prune block index candidates based on our tip if we have
4581 : // no tip due to m_chain being empty!
4582 305 : PruneBlockIndexCandidates();
4583 :
4584 305 : CheckBlockIndex(params.GetConsensus());
4585 :
4586 : // FlushStateToDisk can possibly read ::ChainActive(). Be conservative
4587 : // and skip it here, we're about to -reindex-chainstate anyway, so
4588 : // it'll get called a bunch real soon.
4589 305 : BlockValidationState state;
4590 305 : if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
4591 0 : LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4592 0 : return false;
4593 : }
4594 305 : }
4595 490 : }
4596 :
4597 490 : return true;
4598 490 : }
4599 :
4600 600 : void CChainState::UnloadBlockIndex() {
4601 600 : nBlockSequenceId = 1;
4602 600 : setBlockIndexCandidates.clear();
4603 600 : }
4604 :
4605 : // May NOT be used after any connections are up as much
4606 : // of the peer-processing logic assumes a consistent
4607 : // block index state
4608 597 : void UnloadBlockIndex(CTxMemPool* mempool)
4609 : {
4610 597 : LOCK(cs_main);
4611 597 : g_chainman.Unload();
4612 597 : pindexBestInvalid = nullptr;
4613 597 : pindexBestHeader = nullptr;
4614 597 : if (mempool) mempool->clear();
4615 597 : vinfoBlockFile.clear();
4616 597 : nLastBlockFile = 0;
4617 597 : setDirtyBlockIndex.clear();
4618 597 : setDirtyFileInfo.clear();
4619 597 : versionbitscache.Clear();
4620 17910 : for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
4621 17313 : warningcache[b].clear();
4622 : }
4623 597 : fHavePruned = false;
4624 597 : }
4625 :
4626 498 : bool ChainstateManager::LoadBlockIndex(const CChainParams& chainparams)
4627 : {
4628 498 : AssertLockHeld(cs_main);
4629 : // Load block index from databases
4630 498 : bool needs_init = fReindex;
4631 498 : if (!fReindex) {
4632 490 : bool ret = LoadBlockIndexDB(*this, chainparams);
4633 490 : if (!ret) return false;
4634 490 : needs_init = m_blockman.m_block_index.empty();
4635 490 : }
4636 :
4637 498 : if (needs_init) {
4638 : // Everything here is for *new* reindex/DBs. Thus, though
4639 : // LoadBlockIndexDB may have set fReindex if we shut down
4640 : // mid-reindex previously, we don't check fReindex and
4641 : // instead only check it prior to LoadBlockIndexDB to set
4642 : // needs_init.
4643 :
4644 191 : LogPrintf("Initializing databases...\n");
4645 191 : }
4646 498 : return true;
4647 498 : }
4648 :
4649 598 : bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
4650 : {
4651 598 : LOCK(cs_main);
4652 :
4653 : // Check whether we're already initialized by checking for genesis in
4654 : // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4655 : // set based on the coins db, not the block index db, which is the only
4656 : // thing loaded at this point.
4657 598 : if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash()))
4658 315 : return true;
4659 :
4660 : try {
4661 283 : const CBlock& block = chainparams.GenesisBlock();
4662 283 : FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
4663 283 : if (blockPos.IsNull())
4664 0 : return error("%s: writing genesis block to disk failed", __func__);
4665 283 : CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
4666 283 : ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
4667 283 : } catch (const std::runtime_error& e) {
4668 0 : return error("%s: failed to write genesis block: %s", __func__, e.what());
4669 0 : }
4670 :
4671 283 : return true;
4672 598 : }
4673 :
4674 597 : bool LoadGenesisBlock(const CChainParams& chainparams)
4675 : {
4676 597 : return ::ChainstateActive().LoadGenesisBlock(chainparams);
4677 : }
4678 :
4679 9 : void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp)
4680 : {
4681 : // Map of disk positions for blocks with unknown parent (only used for reindex)
4682 9 : static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4683 9 : int64_t nStart = GetTimeMillis();
4684 :
4685 9 : int nLoaded = 0;
4686 : try {
4687 : // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4688 9 : CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
4689 9 : uint64_t nRewind = blkdat.GetPos();
4690 1447 : while (!blkdat.eof()) {
4691 1446 : if (ShutdownRequested()) return;
4692 :
4693 1446 : blkdat.SetPos(nRewind);
4694 1446 : nRewind++; // start one byte further next time, in case of failure
4695 1446 : blkdat.SetLimit(); // remove former limit
4696 1446 : unsigned int nSize = 0;
4697 : try {
4698 : // locate a header
4699 1446 : unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4700 1446 : blkdat.FindByte(chainparams.MessageStart()[0]);
4701 1438 : nRewind = blkdat.GetPos()+1;
4702 1438 : blkdat >> buf;
4703 1438 : if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE))
4704 0 : continue;
4705 : // read size
4706 1438 : blkdat >> nSize;
4707 1438 : if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4708 0 : continue;
4709 1446 : } catch (const std::exception&) {
4710 : // no valid block header found; don't complain
4711 : break;
4712 8 : }
4713 : try {
4714 : // read block
4715 1438 : uint64_t nBlockPos = blkdat.GetPos();
4716 1438 : if (dbp)
4717 1337 : dbp->nPos = nBlockPos;
4718 1438 : blkdat.SetLimit(nBlockPos + nSize);
4719 1438 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4720 1438 : CBlock& block = *pblock;
4721 1438 : blkdat >> block;
4722 2872 : nRewind = blkdat.GetPos();
4723 :
4724 1438 : uint256 hash = block.GetHash();
4725 : {
4726 1438 : LOCK(cs_main);
4727 : // detect out of order blocks, and store them for later
4728 1438 : if (hash != chainparams.GetConsensus().hashGenesisBlock && !LookupBlockIndex(block.hashPrevBlock)) {
4729 4 : LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4730 : block.hashPrevBlock.ToString());
4731 4 : if (dbp)
4732 4 : mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4733 4 : continue;
4734 : }
4735 :
4736 : // process in case the block isn't known yet
4737 1434 : CBlockIndex* pindex = LookupBlockIndex(hash);
4738 1434 : if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4739 1433 : BlockValidationState state;
4740 1433 : if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
4741 1433 : nLoaded++;
4742 1433 : }
4743 1433 : if (state.IsError()) {
4744 0 : break;
4745 : }
4746 1434 : } else if (hash != chainparams.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4747 0 : LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4748 : }
4749 2872 : }
4750 :
4751 : // Activate the genesis block so normal node progress can continue
4752 1434 : if (hash == chainparams.GetConsensus().hashGenesisBlock) {
4753 9 : BlockValidationState state;
4754 9 : if (!ActivateBestChain(state, chainparams, nullptr)) {
4755 0 : break;
4756 : }
4757 9 : }
4758 :
4759 1434 : NotifyHeaderTip();
4760 :
4761 : // Recursively process earlier encountered successors of this block
4762 1434 : std::deque<uint256> queue;
4763 1434 : queue.push_back(hash);
4764 2872 : while (!queue.empty()) {
4765 1438 : uint256 head = queue.front();
4766 1438 : queue.pop_front();
4767 1438 : std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4768 1442 : while (range.first != range.second) {
4769 4 : std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4770 4 : std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4771 4 : if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
4772 : {
4773 4 : LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4774 : head.ToString());
4775 4 : LOCK(cs_main);
4776 4 : BlockValidationState dummy;
4777 4 : if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
4778 : {
4779 4 : nLoaded++;
4780 4 : queue.push_back(pblockrecursive->GetHash());
4781 4 : }
4782 4 : }
4783 4 : range.first++;
4784 4 : mapBlocksUnknownParent.erase(it);
4785 4 : NotifyHeaderTip();
4786 4 : }
4787 1438 : }
4788 1438 : } catch (const std::exception& e) {
4789 0 : LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4790 0 : }
4791 1446 : }
4792 9 : } catch (const std::runtime_error& e) {
4793 0 : AbortNode(std::string("System error: ") + e.what());
4794 0 : }
4795 9 : LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4796 17 : }
4797 :
4798 334356 : void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
4799 : {
4800 334356 : if (!fCheckBlockIndex) {
4801 : return;
4802 : }
4803 :
4804 333802 : LOCK(cs_main);
4805 :
4806 : // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4807 : // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4808 : // tests when iterating the block tree require that m_chain has been initialized.)
4809 333802 : if (m_chain.Height() < 0) {
4810 16 : assert(m_blockman.m_block_index.size() <= 1);
4811 16 : return;
4812 : }
4813 :
4814 : // Build forward-pointing map of the entire block tree.
4815 333786 : std::multimap<CBlockIndex*,CBlockIndex*> forward;
4816 191898752 : for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
4817 191564966 : forward.insert(std::make_pair(entry.second->pprev, entry.second));
4818 0 : }
4819 :
4820 333786 : assert(forward.size() == m_blockman.m_block_index.size());
4821 :
4822 333786 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4823 333786 : CBlockIndex *pindex = rangeGenesis.first->second;
4824 333786 : rangeGenesis.first++;
4825 333786 : assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4826 :
4827 : // Iterate over the entire block tree, using depth-first search.
4828 : // Along the way, remember whether there are blocks on the path from genesis
4829 : // block being explored which are the first to have certain properties.
4830 : size_t nNodes = 0;
4831 192425231 : int nHeight = 0;
4832 : CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4833 191898752 : CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4834 : CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4835 : CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4836 : CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4837 : CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4838 : CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4839 191898752 : while (pindex != nullptr) {
4840 191564966 : nNodes++;
4841 191564966 : if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4842 191564966 : if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
4843 191564966 : if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4844 191564966 : if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4845 191564966 : if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
4846 191564966 : if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
4847 191564966 : if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
4848 :
4849 : // Begin: actual consistency checks.
4850 191564966 : if (pindex->pprev == nullptr) {
4851 : // Genesis block checks.
4852 333786 : assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
4853 333786 : assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4854 : }
4855 191564966 : if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4856 : // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4857 : // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4858 191564966 : if (!fHavePruned) {
4859 : // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4860 191564966 : assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4861 191564966 : assert(pindexFirstMissing == pindexFirstNeverProcessed);
4862 : } else {
4863 : // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4864 0 : if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4865 : }
4866 191564966 : if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4867 191564966 : assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4868 : // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4869 191564966 : assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4870 191564966 : assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4871 191564966 : assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4872 191564966 : assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4873 191564966 : assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4874 191564966 : assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4875 191564966 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4876 191564966 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4877 191564966 : if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4878 191564966 : if (pindexFirstInvalid == nullptr) {
4879 : // Checks for not-invalid blocks.
4880 191308582 : assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4881 : }
4882 191564966 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4883 694975 : if (pindexFirstInvalid == nullptr) {
4884 : // If this block sorts at least as good as the current tip and
4885 : // is valid and we have all data for its parents, it must be in
4886 : // setBlockIndexCandidates. m_chain.Tip() must also be there
4887 : // even if some data has been pruned.
4888 675077 : if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
4889 675077 : assert(setBlockIndexCandidates.count(pindex));
4890 : }
4891 : // If some parent is missing, then it could be that this block was in
4892 : // setBlockIndexCandidates but had to be removed because of the missing data.
4893 : // In this case it must be in m_blocks_unlinked -- see test below.
4894 : }
4895 : } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4896 190869991 : assert(setBlockIndexCandidates.count(pindex) == 0);
4897 : }
4898 : // Check whether this block is in m_blocks_unlinked.
4899 191564966 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4900 : bool foundInUnlinked = false;
4901 191567295 : while (rangeUnlinked.first != rangeUnlinked.second) {
4902 134561 : assert(rangeUnlinked.first->first == pindex->pprev);
4903 134561 : if (rangeUnlinked.first->second == pindex) {
4904 : foundInUnlinked = true;
4905 132232 : break;
4906 : }
4907 2329 : rangeUnlinked.first++;
4908 : }
4909 191564966 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
4910 : // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
4911 132232 : assert(foundInUnlinked);
4912 : }
4913 191564966 : if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
4914 191564966 : if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
4915 191564966 : if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
4916 : // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4917 0 : assert(fHavePruned); // We must have pruned.
4918 : // This block may have entered m_blocks_unlinked if:
4919 : // - it has a descendant that at some point had more work than the
4920 : // tip, and
4921 : // - we tried switching to that descendant but were missing
4922 : // data for some intermediate block between m_chain and the
4923 : // tip.
4924 : // So if this block is itself better than m_chain.Tip() and it wasn't in
4925 : // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
4926 0 : if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4927 : if (pindexFirstInvalid == nullptr) {
4928 0 : assert(foundInUnlinked);
4929 : }
4930 : }
4931 : }
4932 : // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4933 : // End: actual consistency checks.
4934 :
4935 : // Try descending into the first subnode.
4936 191564966 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4937 191564966 : if (range.first != range.second) {
4938 : // A subnode was found.
4939 190704701 : pindex = range.first->second;
4940 190704701 : nHeight++;
4941 190704701 : continue;
4942 : }
4943 : // This is a leaf node.
4944 : // Move upwards until we reach a node of which we have not yet visited the last child.
4945 191898752 : while (pindex) {
4946 : // We are going to either move to a parent or a sibling of pindex.
4947 : // If pindex was the first with a certain property, unset the corresponding variable.
4948 191564966 : if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
4949 191564966 : if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
4950 191564966 : if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
4951 191564966 : if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
4952 191564966 : if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
4953 191564966 : if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
4954 191564966 : if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
4955 : // Find our parent.
4956 191564966 : CBlockIndex* pindexPar = pindex->pprev;
4957 : // Find which child we just visited.
4958 191564966 : std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4959 193239527 : while (rangePar.first->second != pindex) {
4960 1674561 : assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4961 1674561 : rangePar.first++;
4962 : }
4963 : // Proceed to the next one.
4964 191564966 : rangePar.first++;
4965 191564966 : if (rangePar.first != rangePar.second) {
4966 : // Move to the sibling.
4967 526479 : pindex = rangePar.first->second;
4968 526479 : break;
4969 : } else {
4970 : // Move up further.
4971 191038487 : pindex = pindexPar;
4972 191038487 : nHeight--;
4973 191038487 : continue;
4974 : }
4975 191564966 : }
4976 191564966 : }
4977 :
4978 : // Check that we actually traversed the entire map.
4979 333786 : assert(nNodes == forward.size());
4980 334356 : }
4981 :
4982 1110 : std::string CChainState::ToString()
4983 : {
4984 1110 : CBlockIndex* tip = m_chain.Tip();
4985 1110 : return strprintf("Chainstate [%s] @ height %d (%s)",
4986 1110 : m_from_snapshot_blockhash.IsNull() ? "ibd" : "snapshot",
4987 1110 : tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
4988 1110 : }
4989 :
4990 5 : bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
4991 : {
4992 5 : if (coinstip_size == m_coinstip_cache_size_bytes &&
4993 0 : coinsdb_size == m_coinsdb_cache_size_bytes) {
4994 : // Cache sizes are unchanged, no need to continue.
4995 0 : return true;
4996 : }
4997 5 : size_t old_coinstip_size = m_coinstip_cache_size_bytes;
4998 5 : m_coinstip_cache_size_bytes = coinstip_size;
4999 5 : m_coinsdb_cache_size_bytes = coinsdb_size;
5000 5 : CoinsDB().ResizeCache(coinsdb_size);
5001 :
5002 5 : LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
5003 5 : this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
5004 5 : LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
5005 5 : this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
5006 :
5007 5 : BlockValidationState state;
5008 5 : const CChainParams& chainparams = Params();
5009 :
5010 : bool ret;
5011 :
5012 5 : if (coinstip_size > old_coinstip_size) {
5013 : // Likely no need to flush if cache sizes have grown.
5014 1 : ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED);
5015 1 : } else {
5016 : // Otherwise, flush state to disk and deallocate the in-memory coins map.
5017 4 : ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS);
5018 4 : CoinsTip().ReallocateCache();
5019 : }
5020 5 : return ret;
5021 5 : }
5022 :
5023 500 : std::string CBlockFileInfo::ToString() const
5024 : {
5025 500 : return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
5026 0 : }
5027 :
5028 2 : CBlockFileInfo* GetBlockFileInfo(size_t n)
5029 : {
5030 2 : LOCK(cs_LastBlockFile);
5031 :
5032 2 : return &vinfoBlockFile.at(n);
5033 2 : }
5034 :
5035 354 : ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
5036 : {
5037 354 : LOCK(cs_main);
5038 354 : return VersionBitsState(::ChainActive().Tip(), params, pos, versionbitscache);
5039 354 : }
5040 :
5041 273 : BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos)
5042 : {
5043 273 : LOCK(cs_main);
5044 273 : return VersionBitsStatistics(::ChainActive().Tip(), params, pos);
5045 273 : }
5046 :
5047 354 : int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos)
5048 : {
5049 354 : LOCK(cs_main);
5050 354 : return VersionBitsStateSinceHeight(::ChainActive().Tip(), params, pos, versionbitscache);
5051 354 : }
5052 :
5053 : static const uint64_t MEMPOOL_DUMP_VERSION = 1;
5054 :
5055 490 : bool LoadMempool(CTxMemPool& pool)
5056 : {
5057 490 : const CChainParams& chainparams = Params();
5058 490 : int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
5059 490 : FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
5060 490 : CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5061 490 : if (file.IsNull()) {
5062 303 : LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
5063 303 : return false;
5064 : }
5065 :
5066 187 : int64_t count = 0;
5067 187 : int64_t expired = 0;
5068 187 : int64_t failed = 0;
5069 187 : int64_t already_there = 0;
5070 187 : int64_t unbroadcast = 0;
5071 187 : int64_t nNow = GetTime();
5072 :
5073 : try {
5074 187 : uint64_t version;
5075 187 : file >> version;
5076 187 : if (version != MEMPOOL_DUMP_VERSION) {
5077 0 : return false;
5078 : }
5079 187 : uint64_t num;
5080 187 : file >> num;
5081 292 : while (num--) {
5082 105 : CTransactionRef tx;
5083 105 : int64_t nTime;
5084 105 : int64_t nFeeDelta;
5085 105 : file >> tx;
5086 105 : file >> nTime;
5087 105 : file >> nFeeDelta;
5088 :
5089 105 : CAmount amountdelta = nFeeDelta;
5090 105 : if (amountdelta) {
5091 4 : pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
5092 : }
5093 105 : TxValidationState state;
5094 105 : if (nTime + nExpiryTimeout > nNow) {
5095 105 : LOCK(cs_main);
5096 105 : AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime,
5097 : nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */,
5098 : false /* test_accept */);
5099 105 : if (state.IsValid()) {
5100 96 : ++count;
5101 96 : } else {
5102 : // mempool may contain the transaction already, e.g. from
5103 : // wallet(s) having loaded it while we were processing
5104 : // mempool transactions; consider these as valid, instead of
5105 : // failed, but mark them as 'already there'
5106 9 : if (pool.exists(tx->GetHash())) {
5107 3 : ++already_there;
5108 3 : } else {
5109 6 : ++failed;
5110 : }
5111 : }
5112 105 : } else {
5113 0 : ++expired;
5114 : }
5115 105 : if (ShutdownRequested())
5116 0 : return false;
5117 105 : }
5118 187 : std::map<uint256, CAmount> mapDeltas;
5119 187 : file >> mapDeltas;
5120 :
5121 187 : for (const auto& i : mapDeltas) {
5122 0 : pool.PrioritiseTransaction(i.first, i.second);
5123 0 : }
5124 :
5125 : // TODO: remove this try except in v0.22
5126 187 : std::map<uint256, uint256> unbroadcast_txids;
5127 : try {
5128 187 : file >> unbroadcast_txids;
5129 186 : unbroadcast = unbroadcast_txids.size();
5130 187 : } catch (const std::exception&) {
5131 : // mempool.dat files created prior to v0.21 will not have an
5132 : // unbroadcast set. No need to log a failure if parsing fails here.
5133 1 : }
5134 234 : for (const auto& elem : unbroadcast_txids) {
5135 : // Don't add unbroadcast transactions that didn't get back into the
5136 : // mempool.
5137 47 : const CTransactionRef& added_tx = pool.get(elem.first);
5138 47 : if (added_tx != nullptr) {
5139 41 : pool.AddUnbroadcastTx(elem.first, added_tx->GetWitnessHash());
5140 : }
5141 47 : }
5142 187 : } catch (const std::exception& e) {
5143 0 : LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
5144 : return false;
5145 0 : }
5146 :
5147 187 : LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
5148 187 : return true;
5149 491 : }
5150 :
5151 492 : bool DumpMempool(const CTxMemPool& pool)
5152 : {
5153 492 : int64_t start = GetTimeMicros();
5154 :
5155 492 : std::map<uint256, CAmount> mapDeltas;
5156 492 : std::vector<TxMempoolInfo> vinfo;
5157 492 : std::map<uint256, uint256> unbroadcast_txids;
5158 :
5159 492 : static Mutex dump_mutex;
5160 492 : LOCK(dump_mutex);
5161 :
5162 : {
5163 492 : LOCK(pool.cs);
5164 502 : for (const auto &i : pool.mapDeltas) {
5165 10 : mapDeltas[i.first] = i.second;
5166 0 : }
5167 492 : vinfo = pool.infoAll();
5168 492 : unbroadcast_txids = pool.GetUnbroadcastTxs();
5169 492 : }
5170 :
5171 492 : int64_t mid = GetTimeMicros();
5172 :
5173 : try {
5174 492 : FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
5175 492 : if (!filestr) {
5176 1 : return false;
5177 : }
5178 :
5179 491 : CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5180 :
5181 491 : uint64_t version = MEMPOOL_DUMP_VERSION;
5182 491 : file << version;
5183 :
5184 491 : file << (uint64_t)vinfo.size();
5185 1129 : for (const auto& i : vinfo) {
5186 638 : file << *(i.tx);
5187 638 : file << int64_t{count_seconds(i.m_time)};
5188 638 : file << int64_t{i.nFeeDelta};
5189 638 : mapDeltas.erase(i.tx->GetHash());
5190 : }
5191 :
5192 491 : file << mapDeltas;
5193 :
5194 491 : LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
5195 491 : file << unbroadcast_txids;
5196 :
5197 491 : if (!FileCommit(file.Get()))
5198 0 : throw std::runtime_error("FileCommit failed");
5199 491 : file.fclose();
5200 491 : RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
5201 491 : int64_t last = GetTimeMicros();
5202 491 : LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
5203 491 : } catch (const std::exception& e) {
5204 0 : LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
5205 : return false;
5206 0 : }
5207 491 : return true;
5208 492 : }
5209 :
5210 : //! Guess how far we are in the verification process at the given block index
5211 : //! require cs_main if pindex has not been validated yet (because nChainTx might be unset)
5212 128050 : double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
5213 128050 : if (pindex == nullptr)
5214 1 : return 0.0;
5215 :
5216 128049 : int64_t nNow = time(nullptr);
5217 :
5218 : double fTxTotal;
5219 :
5220 128049 : if (pindex->nChainTx <= data.nTxCount) {
5221 186 : fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
5222 186 : } else {
5223 127863 : fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
5224 : }
5225 :
5226 128049 : return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
5227 128050 : }
5228 :
5229 : class CMainCleanup
5230 : {
5231 : public:
5232 1280 : CMainCleanup() {}
5233 1280 : ~CMainCleanup() {
5234 : // block headers
5235 640 : BlockMap::iterator it1 = g_chainman.BlockIndex().begin();
5236 94729 : for (; it1 != g_chainman.BlockIndex().end(); it1++)
5237 94089 : delete (*it1).second;
5238 640 : g_chainman.BlockIndex().clear();
5239 1280 : }
5240 : };
5241 640 : static CMainCleanup instance_of_cmaincleanup;
5242 :
5243 0 : Optional<uint256> ChainstateManager::SnapshotBlockhash() const {
5244 0 : if (m_active_chainstate != nullptr) {
5245 : // If a snapshot chainstate exists, it will always be our active.
5246 0 : return m_active_chainstate->m_from_snapshot_blockhash;
5247 : }
5248 0 : return {};
5249 0 : }
5250 :
5251 3609 : std::vector<CChainState*> ChainstateManager::GetAll()
5252 : {
5253 3609 : std::vector<CChainState*> out;
5254 :
5255 3609 : if (!IsSnapshotValidated() && m_ibd_chainstate) {
5256 3591 : out.push_back(m_ibd_chainstate.get());
5257 3591 : }
5258 :
5259 3609 : if (m_snapshot_chainstate) {
5260 2 : out.push_back(m_snapshot_chainstate.get());
5261 2 : }
5262 :
5263 : return out;
5264 3609 : }
5265 :
5266 602 : CChainState& ChainstateManager::InitializeChainstate(CTxMemPool& mempool, const uint256& snapshot_blockhash)
5267 : {
5268 602 : bool is_snapshot = !snapshot_blockhash.IsNull();
5269 : std::unique_ptr<CChainState>& to_modify =
5270 602 : is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
5271 :
5272 602 : if (to_modify) {
5273 0 : throw std::logic_error("should not be overwriting a chainstate");
5274 : }
5275 602 : to_modify.reset(new CChainState(mempool, m_blockman, snapshot_blockhash));
5276 :
5277 : // Snapshot chainstates and initial IBD chaintates always become active.
5278 602 : if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
5279 602 : LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
5280 602 : m_active_chainstate = to_modify.get();
5281 : } else {
5282 0 : throw std::logic_error("unexpected chainstate activation");
5283 : }
5284 :
5285 602 : return *to_modify;
5286 0 : }
5287 :
5288 303958 : CChainState& ChainstateManager::ActiveChainstate() const
5289 : {
5290 303958 : assert(m_active_chainstate);
5291 303958 : return *m_active_chainstate;
5292 : }
5293 :
5294 2 : bool ChainstateManager::IsSnapshotActive() const
5295 : {
5296 2 : return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
5297 : }
5298 :
5299 4 : CChainState& ChainstateManager::ValidatedChainstate() const
5300 : {
5301 4 : if (m_snapshot_chainstate && IsSnapshotValidated()) {
5302 0 : return *m_snapshot_chainstate.get();
5303 : }
5304 4 : assert(m_ibd_chainstate);
5305 4 : return *m_ibd_chainstate.get();
5306 4 : }
5307 :
5308 3 : bool ChainstateManager::IsBackgroundIBD(CChainState* chainstate) const
5309 : {
5310 3 : return (m_snapshot_chainstate && chainstate == m_ibd_chainstate.get());
5311 : }
5312 :
5313 599 : void ChainstateManager::Unload()
5314 : {
5315 1199 : for (CChainState* chainstate : this->GetAll()) {
5316 600 : chainstate->m_chain.SetTip(nullptr);
5317 600 : chainstate->UnloadBlockIndex();
5318 : }
5319 :
5320 599 : m_blockman.Unload();
5321 599 : }
5322 :
5323 99 : void ChainstateManager::Reset()
5324 : {
5325 99 : m_ibd_chainstate.reset();
5326 99 : m_snapshot_chainstate.reset();
5327 99 : m_active_chainstate = nullptr;
5328 99 : m_snapshot_validated = false;
5329 99 : }
5330 :
5331 2 : void ChainstateManager::MaybeRebalanceCaches()
5332 : {
5333 2 : if (m_ibd_chainstate && !m_snapshot_chainstate) {
5334 1 : LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
5335 : // Allocate everything to the IBD chainstate.
5336 1 : m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5337 1 : }
5338 1 : else if (m_snapshot_chainstate && !m_ibd_chainstate) {
5339 0 : LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
5340 : // Allocate everything to the snapshot chainstate.
5341 0 : m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5342 0 : }
5343 1 : else if (m_ibd_chainstate && m_snapshot_chainstate) {
5344 : // If both chainstates exist, determine who needs more cache based on IBD status.
5345 : //
5346 : // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
5347 1 : if (m_snapshot_chainstate->IsInitialBlockDownload()) {
5348 2 : m_ibd_chainstate->ResizeCoinsCaches(
5349 1 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
5350 2 : m_snapshot_chainstate->ResizeCoinsCaches(
5351 1 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
5352 1 : } else {
5353 0 : m_snapshot_chainstate->ResizeCoinsCaches(
5354 0 : m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
5355 0 : m_ibd_chainstate->ResizeCoinsCaches(
5356 0 : m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
5357 : }
5358 : }
5359 2 : }
|